diff -Nru python-apsw-3.39.2.0/apsw/ext.py python-apsw-3.40.0.0/apsw/ext.py --- python-apsw-3.39.2.0/apsw/ext.py 1970-01-01 00:00:00.000000000 +0000 +++ python-apsw-3.40.0.0/apsw/ext.py 2022-11-27 06:12:04.000000000 +0000 @@ -0,0 +1,495 @@ +# Provides various useful routines + +from __future__ import annotations +import collections, collections.abc +import sys +if sys.version_info >= (3, 10): + from types import NoneType +else: + NoneType = type(None) + +from dataclasses import dataclass, make_dataclass + +from typing import Optional, Tuple, Union, List, Any, Dict, Callable, Sequence +import functools +import abc + +import apsw + +try: + from keyword import iskeyword as _iskeyword +except ImportError: + # From https://docs.python.org/3/reference/lexical_analysis.html#keywords + _keywords = set(""" + False await else import pass + None break except in raise + True class finally is return + and continue for lambda try + as def from nonlocal while + assert del global not with + async elif if or yield + """.split()) + + def _iskeyword(s: str) -> bool: + return s in _keywords + + +class DataClassRowFactory: + """Returns each row as a :mod:`dataclass `, accessible by column name. + + To use set an instance as :attr:`Connection.rowtrace + ` to affect all :class:`cursors + `, or on a specific cursor:: + + connection.rowtrace = apsw.ext.DataClassRowFactory() + for row in connection.execute("SELECT title, sum(orders) AS total, ..."): + # You can now access by name + print (row.title, row.total) + # you can get the underlying description + print (row.__description__) + + You can use as many instances of this class as you want, each across as many + :class:`connections ` as you want. + + :param rename: Column names could be duplicated, or not + valid in Python (eg a column named `continue`). + If `rename` is True, then invalid/duplicate names are replaced + with `_` and their position starting at zero. For example `title, + total, title, continue` would become `title, total, _2, _3`. If + `rename` is False then problem column names will result in + :exc:`TypeError` raised by :func:`dataclasses.make_dataclass` + + :param dataclass_kwargs: Additional parameters when creating the dataclass + as described in :func:`dataclasses.dataclass`. For example you may + want `frozen = True` to make the dataclass read-only, or `slots = True` + to reduce memory consumption. + + """ + + def __init__(self, *, rename: bool = True, dataclass_kwargs: Optional[Dict[str, Any]] = None): + self.dataclass_kwargs = dataclass_kwargs or {} + self.rename = rename + + @functools.lru_cache(maxsize=16) + def get_dataclass(self, description: Tuple[Tuple[str, str], ...]) -> Tuple[Any, Tuple[str, ...]]: + """Returns dataclass and tuple of (potentially renamed) column names + + The dataclass is what is returned for each row with that + :meth:`description ` + + This method caches it results. + """ + names = [d[0] for d in description] + if self.rename: + new_names: List[str] = [] + for i, n in enumerate(names): + if n.isidentifier() and not _iskeyword(n) and n not in new_names: + new_names.append(n) + else: + new_names.append(f"_{ i }") + names = new_names + types = [self.get_type(d[1]) for d in description] + + kwargs = self.dataclass_kwargs.copy() + if "namespace" not in kwargs: + kwargs["namespace"] = {} + kwargs["namespace"]["__description__"] = description + + # some magic to make the reported classnames different + suffix = (".%06X" % hash(repr(description)))[:7] + + return make_dataclass(f"{ self.__class__.__name__ }{ suffix }", zip(names, types), **kwargs), tuple(names) + + def get_type(self, t: Optional[str]) -> Any: + """Returns the `type hint `__ to use in the dataclass based on the type in the :meth:`description ` + + `SQLite's affinity rules `__ are followed. + + The values have no effect on how your program runs, but can be used by tools like + mypy. Column information like whether `null` is allowed is not present, so + this is just a hint. + """ + if not t: + return Any + # From 3.1 https://www.sqlite.org/datatype3.html + t = t.upper() + if "INT" in t: + return int + if "CHAR" in t or "CLOB" in t or "TEXT" in t: + return str + if "BLOB" in t: + return bytes + if "REAL" in t or "FLOA" in t or "DOUB" in t: + return float + return Union[float, int] + + def __call__(self, cursor: apsw.Cursor, row: apsw.SQLiteValues) -> Any: + """What the row tracer calls + + This :meth:`looks up ` the dataclass and column + names, and then returns an instance of the dataclass. + """ + dc, column_names = self.get_dataclass(cursor.getdescription()) + return dc(**dict(zip(column_names, row))) + + +class SQLiteTypeAdapter(abc.ABC): + """A metaclass to indicate conversion to SQLite types is supported + + This is one way to indicate your type supports conversion to a + value supported by SQLite. You can either inherit from this class, + or call the register method:: + + apsw.ext.SQLiteTypeAdapter.register(YourClassHere) + + Doing either is entirely sufficient and there is no need to + register with :class:`TypesConverterCursorFactory` + """ + + @abc.abstractmethod + def to_sqlite_value(self) -> apsw.SQLiteValue: + "Return a SQLite compatible value for this object" + raise NotImplementedError + + +class TypesConverterCursorFactory: + """Provides cursors that can convert objects into one of the types supported by SQLite. or back from SQLite + + :param metaclass: Which metaclass to consider as conversion capable + """ + + def __init__(self, abstract_base_class: abc.ABCMeta = SQLiteTypeAdapter): + self.abstract_base_class = abstract_base_class + # to sqlite value + self.adapters: Dict[type, Callable[[Any], apsw.SQLiteValue]] = {} + # from sqlite value + self.converters: Dict[str, Callable[[apsw.SQLiteValue], Any]] = {} + + def register_adapter(self, klass: type, callable: Callable[[Any], apsw.SQLiteValue]) -> None: + """Registers a callable that converts from `klass` to one of the supported SQLite types""" + self.adapters[klass] = callable + + def register_converter(self, name: str, callable: Callable[[apsw.SQLiteValue], Any]) -> None: + """Registers a callable that converts from a SQLite value""" + self.converters[name] = callable + + def __call__(self, connection: apsw.Connection) -> TypeConverterCursor: + "Returns a new :class:`cursor ` for the `connection`" + return TypesConverterCursorFactory.TypeConverterCursor(connection, self) + + def adapt_value(self, value: Any) -> apsw.SQLiteValue: + "Returns SQLite representation of `value`" + if isinstance(value, (int, bytes, str, NoneType, float)): + return value + if isinstance(value, self.abstract_base_class): + return value.to_sqlite_value() + adapter = self.adapters.get(type(value)) + if not adapter: + raise TypeError(f"No adapter registered for type { type(value) }") + return adapter(value) + + def convert_value(self, schematype: str, value: apsw.SQLiteValue) -> Any: + "Returns Python object from schema type and SQLite value" + converter = self.converters.get(schematype) + if not converter: + return value + return converter(value) + + def wrap_bindings(self, bindings: Optional[apsw.Bindings]) -> Optional[apsw.Bindings]: + "Wraps bindings that are supplied to underlying execute" + if bindings is None: + return None + if isinstance(bindings, (dict, collections.abc.Mapping)): + return TypesConverterCursorFactory.DictAdapter(self, bindings) + # turn into a list since PySequence_Fast does that anyway + return [self.adapt_value(v) for v in bindings] + + def wrap_sequence_bindings(self, sequenceofbindings: Sequence[apsw.Bindings]): + for binding in sequenceofbindings: + yield self.wrap_bindings(binding) + + class DictAdapter(collections.abc.Mapping): + "Used to wrap dictionaries supplied as bindings" + + def __init__(self, factory: TypesConverterCursorFactory, data: collections.abc.Mapping[str, apsw.SQLiteValue]): + self.data = data + self.factory = factory + + def __getitem__(self, key: str) -> apsw.SQLiteValue: + return self.factory.adapt_value(self.data[key]) + + def __iter__(self): + "Required by mapping, but not used" + raise NotImplementedError + + def __len__(self): + "Required by mapping, but not used" + raise NotImplementedError + + class TypeConverterCursor(apsw.Cursor): + "Cursor used to do conversions" + + def __init__(self, connection: apsw.Connection, factory: TypesConverterCursorFactory): + super().__init__(connection) + self.factory = factory + self.rowtrace = self._rowtracer + + def _rowtracer(self, cursor: apsw.Cursor, values: apsw.SQLiteValues) -> Tuple[Any, ...]: + return tuple(self.factory.convert_value(d[1], v) for d, v in zip(cursor.getdescription(), values)) + + def execute(self, + statements: str, + bindings: Optional[apsw.Bindings] = None, + *, + can_cache: bool = True, + prepare_flags: int = 0) -> apsw.Cursor: + """Executes the statements doing conversions on supplied and returned values + + See :meth:`apsw.Cursor.execute` for parameter details""" + return super().execute(statements, + self.factory.wrap_bindings(bindings), + can_cache=can_cache, + prepare_flags=prepare_flags) + + def executemany(self, + statements: str, + sequenceofbindings: Sequence[apsw.Bindings], + *, + can_cache: bool = True, + prepare_flags: int = 0) -> apsw.Cursor: + """Executes the statements against each item in sequenceofbindings, doing conversions on supplied and returned values + + See :meth:`apsw.Cursor.executemany` for parameter details""" + return super().executemany(statements, + self.factory.wrap_sequence_bindings(sequenceofbindings), + can_cache=can_cache, + prepare_flags=prepare_flags) + + +def query_info(db: apsw.Connection, + query: str, + bindings: Optional[apsw.Bindings] = None, + *, + prepare_flags: int = 0, + actions: bool = False, + expanded_sql: bool = False, + explain: bool = False, + explain_query_plan: bool = False) -> QueryDetails: + """Returns information about the query, but does not run it. + + Set the various parameters to `True` if you also want the + actions, expanded_sql, explain, query_plan etc filled in. + """ + res: dict[str, Any] = {"actions": None, "query_plan": None, "explain": None} + + def tracer(cursor: apsw.Cursor, first_query: str, bindings: Optional[apsw.Bindings]): + nonlocal res + res.update({ + "first_query": first_query, + "query": query, + "bindings": bindings, + "is_explain": cursor.is_explain, + "is_readonly": cursor.is_readonly, + "description": cursor.getdescription(), + "description_full": None, + }) + if hasattr(cursor, "description_full"): + res["description_full"] = cursor.description_full + + assert query == first_query or query.startswith(first_query) + res["query_remaining"] = query[len(first_query):] if len(query) > len(first_query) else None + res["expanded_sql"] = cursor.expanded_sql if expanded_sql else None + return False + + actions_taken = [] + + def auther(code, third, fourth, dbname, trigview): + a = {"action": code, "action_name": apsw.mapping_authorizer_function[code]} + if dbname: + a["database_name"] = dbname + if trigview: + a["trigger_or_view"] = trigview + + # this block corresponds to the table at https://sqlite.org/c3ref/c_alter_table.html + for op, thirdname, fourthname in ( + (apsw.SQLITE_CREATE_INDEX, "index_name", "table_name"), + (apsw.SQLITE_CREATE_TABLE, "table_name", None), + (apsw.SQLITE_CREATE_TEMP_INDEX, "index_name", "table_name"), + (apsw.SQLITE_CREATE_TEMP_TABLE, "table_name", None), + (apsw.SQLITE_CREATE_TEMP_TRIGGER, "trigger_name", "table_name"), + (apsw.SQLITE_CREATE_TEMP_VIEW, "view_name", None), + (apsw.SQLITE_CREATE_TRIGGER, "trigger_name", "table_name"), + (apsw.SQLITE_CREATE_VIEW, "view_name", None), + (apsw.SQLITE_DELETE, "table_name", None), + (apsw.SQLITE_DROP_INDEX, "index_name", "table_name"), + (apsw.SQLITE_DROP_TABLE, "table_name", None), + (apsw.SQLITE_DROP_TEMP_INDEX, "index_name", "table_name"), + (apsw.SQLITE_DROP_TEMP_TABLE, "table_name", None), + (apsw.SQLITE_DROP_TEMP_TRIGGER, "trigger_name", "table_name"), + (apsw.SQLITE_DROP_TEMP_VIEW, "view_name", None), + (apsw.SQLITE_DROP_TRIGGER, "trigger_name", "table_name"), + (apsw.SQLITE_DROP_VIEW, "view_name", None), + (apsw.SQLITE_INSERT, "table_name", None), + (apsw.SQLITE_PRAGMA, "pragma_name", "pragma_value"), + (apsw.SQLITE_READ, "table_name", "column_name"), + (apsw.SQLITE_SELECT, None, None), + (apsw.SQLITE_TRANSACTION, "operation", None), + (apsw.SQLITE_UPDATE, "table_name", "column_name"), + (apsw.SQLITE_ATTACH, "file_name", None), + (apsw.SQLITE_DETACH, "database_name", None), + (apsw.SQLITE_ALTER_TABLE, "database_name", "table_name"), + (apsw.SQLITE_REINDEX, "index_name", None), + (apsw.SQLITE_ANALYZE, "table_name", None), + (apsw.SQLITE_CREATE_VTABLE, "table_name", "module_name"), + (apsw.SQLITE_DROP_VTABLE, "table_name", "module_name"), + (apsw.SQLITE_FUNCTION, None, "function_name"), + (apsw.SQLITE_SAVEPOINT, "operation", None), + (apsw.SQLITE_RECURSIVE, None, None), + ): + if code == op: + if thirdname is not None: + a[thirdname] = third + if fourthname is not None: + a[fourthname] = fourth + break + else: + raise ValueError(f"Unknown authorizer code { code }") + actions_taken.append(QueryAction(**a)) + return apsw.SQLITE_OK + + cur = db.cursor() + cur.exectrace = tracer + if actions: + orig_authorizer = db.authorizer + db.authorizer = auther + try: + cur.execute(query, bindings, can_cache=False, prepare_flags=prepare_flags) + except apsw.ExecTraceAbort: + pass + finally: + if actions: + db.authorizer = orig_authorizer + cur.exectrace = None + if actions: + res["actions"] = actions_taken + + if explain and not res["is_explain"]: + vdbe = [] + for row in cur.execute("EXPLAIN " + res["first_query"], bindings): + vdbe.append( + VDBEInstruction(**dict((v[0][0], v[1]) for v in zip(cur.getdescription(), row) if v[1] is not None))) + res["explain"] = vdbe + + if explain_query_plan and not res["is_explain"]: + subn = "sub" + byid = {0: {"detail": "QUERY PLAN"}} + + for row in cur.execute("EXPLAIN QUERY PLAN " + res["first_query"], bindings): + node = dict((v[0][0], v[1]) for v in zip(cur.getdescription(), row) if v[0][0] != "notused") + assert len(node) == 3 # catch changes in returned format + parent = byid[node["parent"]] + if subn not in parent: + parent[subn] = [node] + else: + parent[subn].append(node) + byid[node["id"]] = node + + def flatten(node): + res = {"detail": node["detail"]} + if subn in node: + res[subn] = [QueryPlan(**flatten(child)) for child in node[subn]] + return res + + res["query_plan"] = QueryPlan(**flatten(byid[0])) + + return QueryDetails(**res) + + +@dataclass +class QueryDetails: + "A :mod:`dataclass ` that provides detailed information about a query, returned by :func:`query_info`" + query: str + "Original query provided" + bindings: Optional[apsw.Bindings] + "Bindings provided" + first_query: str + "The first statement present in query" + query_remaining: Optional[str] + "Query text after the first one if multiple were in query, else None" + is_explain: int + ":attr:`Cursor.is_explain `" + is_readonly: bool + ":attr:`Cursor.is_readonly `" + description: Tuple[Tuple[str, str], ...] + ":meth:`Cursor.getdescription `" + description_full: Optional[Tuple[Tuple[str, str, str, str, str], ...]] + ":attr:`Cursor.description_full `" + expanded_sql: Optional[str] + ":attr:`Cursor.expanded_sql `" + actions: Optional[List[QueryAction]] + """A list of the actions taken by the query, as discovered via + :attr:`Connection.authorizer `""" + explain: Optional[List[VDBEInstruction]] + """A list of instructions of the `internal code `__ + used by SQLite to execute the query""" + query_plan: Optional[QueryPlan] + """The steps taken against tables and indices `described here `__""" + + +@dataclass +class QueryAction: + """A :mod:`dataclass ` that provides information about one action taken by a query + + Depending on the action, only a subset of the fields will have non-None values""" + action: int + """`Authorizer code `__ (also present + in :attr:`apsw.mapping_authorizer_function`)""" + action_name: str + """The string corresponding to the action. For example `action` could be `21` in which + case `action_name` will be `SQLITE_SELECT`""" + + column_name: Optional[str] = None + database_name: Optional[str] = None + "eg `main`, `temp`, the name in `ATTACH `__" + file_name: Optional[str] = None + function_name: Optional[str] = None + module_name: Optional[str] = None + operation: Optional[str] = None + pragma_name: Optional[str] = None + pragma_value: Optional[str] = None + table_name: Optional[str] = None + trigger_name: Optional[str] = None + trigger_or_view: Optional[str] = None + """This action is happening due to a trigger or view, and not + directly expressed in the query itself""" + view_name: Optional[str] = None + + +@dataclass +class QueryPlan: + "A :mod:`dataclass ` for one step of a query plan" + detail: str + "Description of this step" + sub: Optional[List[QueryPlan]] = None + "Steps that run within this one" + + +@dataclass +class VDBEInstruction: + "A :mod:`dataclass ` representing one instruction and its parameters" + addr: int + "Address of this opcode. It will be the target of goto, loops etc" + opcode: str + "The instruction" + comment: Optional[str] = None + "Additional human readable information" + p1: Optional[int] = None + "First opcode parameter" + p2: Optional[int] = None + "Second opcode parameter" + p3: Optional[int] = None + "Third opcode parameter" + p4: Optional[int] = None + "Fourth opcode parameter" + p5: Optional[int] = None + "Fifth opcode parameter" diff -Nru python-apsw-3.39.2.0/apsw/__init__.pyi python-apsw-3.40.0.0/apsw/__init__.pyi --- python-apsw-3.39.2.0/apsw/__init__.pyi 2022-07-28 14:21:36.000000000 +0000 +++ python-apsw-3.40.0.0/apsw/__init__.pyi 2022-11-27 06:14:28.000000000 +0000 @@ -1,26 +1,34 @@ # This file is generated by rst2docstring +import sys + from typing import Union, Tuple, List, Optional, Callable, Any, Dict, \ Iterator, Sequence, Literal, Set -from array import array -from types import TracebackType +from collections.abc import Mapping +import array +import types + +if sys.version_info >= (3, 8): + from typing import Protocol SQLiteValue = Union[None, int, float, bytes, str] """SQLite supports 5 types - None (NULL), 64 bit signed int, 64 bit -float, bytes, and unicode text""" +float, bytes, and str (unicode text)""" SQLiteValues = Union[Tuple[()], Tuple[SQLiteValue, ...]] "A sequence of zero or more SQLiteValue" -Bindings = Union[Sequence[Union[SQLiteValue, zeroblob]], Dict[str, Union[SQLiteValue, zeroblob]]] +Bindings = Union[Sequence[Union[SQLiteValue, zeroblob]], Mapping[str, Union[SQLiteValue, zeroblob]]] """Query bindings are either a sequence of SQLiteValue, or a dict mapping names -to SQLiteValues. You can also provide zeroblob in Bindings.""" +to SQLiteValues. You can also provide zeroblob in Bindings. You can use +dict subclasses or any type registered with :class:`collections.abc.Mapping` +for named bindings""" # Neither TypeVar nor ParamSpec work, when either should AggregateT = Any "An object provided as first parameter of step and final aggregate functions" -AggregateStep = Union[ +AggregateStep = Union [ Callable[[AggregateT], None], Callable[[AggregateT, SQLiteValue], None], Callable[[AggregateT, SQLiteValue, SQLiteValue], None], @@ -38,7 +46,7 @@ """Called each time for the start of a new calculation using an aggregate function, returning an object, a step function and a final function""" -ScalarProtocol = Union[ +ScalarProtocol = Union [ Callable[[], SQLiteValue], Callable[[SQLiteValue], SQLiteValue], Callable[[SQLiteValue, SQLiteValue], SQLiteValue], @@ -60,73 +68,107 @@ """Execution tracers are called with the cursor, sql query text, and the bindings used. Return False/None to abort execution, or True to continue""" +Authorizer = Callable[[int, Optional[str], Optional[str], Optional[str], Optional[str]], int] +"""Authorizers are called with an operation code and 4 strings (which could be None) depending +on the operatation. Return SQLITE_OK, SQLITE_DENY, or SQLITE_IGNORE""" + +CommitHook = Callable[[], bool] +"""Commit hook is called with no arguments and should return True to abort the commit and False +to let it continue""" SQLITE_VERSION_NUMBER: int -def apswversion() -> str: +"""The integer version number of SQLite that APSW was compiled +against. For example SQLite 3.6.4 will have the value *3006004*. +This number may be different than the actual library in use if the +library is shared and has been updated. Call +:meth:`sqlitelibversion` to get the actual library version.""" + +def apswversion() -> str: """Returns the APSW version.""" ... compile_options: Tuple[str, ...] -def complete(statement: str) -> bool: +"""A tuple of the options used to compile SQLite. For example it +will be something like this:: + + ('ENABLE_LOCKING_STYLE=0', 'TEMP_STORE=1', 'THREADSAFE=1') + +Calls: `sqlite3_compileoption_get `__""" + +def complete(statement: str) -> bool: """Returns True if the input string comprises one or more complete SQL statements by looking for an unquoted trailing semi-colon. - + An example use would be if you were prompting the user for SQL statements and needed to know if you had a whole statement, or needed to ask for another line:: - + statement = input("SQL> ") while not apsw.complete(statement): more = input(" .. ") statement = statement + "\\n" + more - + Calls: `sqlite3_complete `__""" ... -def config(op: int, *args: Any) -> None: +def config(op: int, *args: Any) -> None: """:param op: A `configuration operation `_ :param args: Zero or more arguments as appropriate for *op* - + Many operations don't make sense from a Python program. The following configuration operations are supported: SQLITE_CONFIG_LOG, SQLITE_CONFIG_SINGLETHREAD, SQLITE_CONFIG_MULTITHREAD, SQLITE_CONFIG_SERIALIZED, SQLITE_CONFIG_URI, SQLITE_CONFIG_MEMSTATUS, SQLITE_CONFIG_COVERING_INDEX_SCAN, SQLITE_CONFIG_PCACHE_HDRSZ, SQLITE_CONFIG_PMASZ, and SQLITE_CONFIG_STMTJRNL_SPILL. - + See :ref:`tips ` for an example of how to receive log messages (SQLITE_CONFIG_LOG) - + Calls: `sqlite3_config `__""" ... connection_hooks: List[Callable[[Connection], None]] -def enablesharedcache(enable: bool) -> None: +"""The purpose of the hooks is to allow the easy registration of +:meth:`functions `, +:ref:`virtual tables ` or similar items with +each :class:`Connection` as it is created. The default value is an empty +list. Whenever a Connection is created, each item in +apsw.connection_hooks is invoked with a single parameter being +the new Connection object. If the hook raises an exception then +the creation of the Connection fails. + +If you wanted to store your own defined functions in the +database then you could define a hook that looked in the +relevant tables, got the Python text and turned it into the +functions.""" + +def enablesharedcache(enable: bool) -> None: """If you use the same :class:`Connection` across threads or use multiple :class:`connections ` accessing the same file, then SQLite can `share the cache between them `_. It is :ref:`not recommended ` that you use this. - + Calls: `sqlite3_enable_shared_cache `__""" ... -def exceptionfor(code: int) -> Exception: +def exceptionfor(code: int) -> Exception: """If you would like to raise an exception that corresponds to a particular SQLite `error code `_ then call this function. It also understands `extended error codes `_. - + For example to raise `SQLITE_IOERR_ACCESS `_:: - + raise apsw.exceptionfor(apsw.SQLITE_IOERR_ACCESS)""" ... -def fork_checker() -> None: +def fork_checker() -> None: """**Note** This method is not available on Windows as it does not support the fork system call. - + SQLite does not allow the use of database connections across `forked `__ processes (see the `SQLite FAQ Q6 `__). @@ -135,12 +177,12 @@ do this to SQLite then parent and child would both consider themselves owners of open databases and silently corrupt each other's work and interfere with each other's locks.) - + One example of how you may end up using fork is if you use the `multiprocessing module `__ which uses fork to make child processes. - + If you do use fork or multiprocessing on a platform that supports fork then you **must** ensure database connections and their objects (cursors, backup, blobs etc) are not used in the parent process, or @@ -150,7 +192,7 @@ are closed. It is also a good idea to call `gc.collect(2) `__ to ensure anything you may have missed is also deallocated.) - + Once you run this method, extra checking code is inserted into SQLite's mutex operations (at a very small performance penalty) that verifies objects are not used across processes. You will get a @@ -160,7 +202,7 @@ may be reported by Python after the line where the issue actually arose. (Destructors of objects you didn't close also run between lines.) - + You should only call this method as the first line after importing APSW, as it has to shutdown and re-initialize SQLite. If you have any SQLite objects already allocated when calling the method then @@ -168,674 +210,3509 @@ checking as part of your test suite.""" ... -def format_sql_value(value: SQLiteValue) -> str: +def format_sql_value(value: SQLiteValue) -> str: """Returns a Python string representing the supplied value in SQL syntax.""" ... -def initialize() -> None: +def initialize() -> None: """It is unlikely you will want to call this method as SQLite automatically initializes. - + Calls: `sqlite3_initialize `__""" ... keywords: Set[str] -def log(errorcode: int, message: str) -> None: +"""A set containing every SQLite keyword + +Calls: + * `sqlite3_keyword_count `__ + * `sqlite3_keyword_name `__""" + +def log(errorcode: int, message: str) -> None: """Calls the SQLite logging interface. Note that you must format the message before passing it to this method:: - + apsw.log(apsw.SQLITE_NOMEM, f"Need { needed } bytes of memory") - + See :ref:`tips ` for an example of how to receive log messages. - - Calls: `sqlite3_log `__""" - ... -def main() -> None: - """Call this to run the :ref:`interactive shell `. It - automatically passes in sys.argv[1:] and exits Python when done.""" + Calls: `sqlite3_log `__""" ... -def memoryhighwater(reset: bool = False) -> int: +def memoryhighwater(reset: bool = False) -> int: """Returns the maximum amount of memory SQLite has used. If *reset* is True then the high water mark is reset to the current value. - + .. seealso:: - + :meth:`status` - + Calls: `sqlite3_memory_highwater `__""" ... -def memoryused() -> int: +def memoryused() -> int: """Returns the amount of memory SQLite is currently using. - + .. seealso:: :meth:`status` - + + Calls: `sqlite3_memory_used `__""" ... -def randomness(amount: int) -> bytes: +def randomness(amount: int) -> bytes: """Gets random data from SQLite's random number generator. - + :param amount: How many bytes to return - + Calls: `sqlite3_randomness `__""" ... -def releasememory(amount: int) -> int: +def releasememory(amount: int) -> int: """Requests SQLite try to free *amount* bytes of memory. Returns how many bytes were freed. - + Calls: `sqlite3_release_memory `__""" ... -def shutdown() -> None: +def shutdown() -> None: """It is unlikely you will want to call this method and there is no need to do so. It is a **really** bad idea to call it unless you are absolutely sure all :class:`connections `, - :class:`blobs `, :class:`cursors `, :class:`vfs ` + :class:`blobs `, :class:`cursors `, :class:`vfs ` etc have been closed, deleted and garbage collected. - + Calls: `sqlite3_shutdown `__""" ... -def softheaplimit(limit: int) -> int: +def softheaplimit(limit: int) -> int: """Requests SQLite try to keep memory usage below *amount* bytes and returns the previous limit. - + Calls: `sqlite3_soft_heap_limit64 `__""" ... -def sqlite3_sourceid() -> str: +def sqlite3_sourceid() -> str: """Returns the exact checkin information for the SQLite 3 source being used. - + Calls: `sqlite3_sourceid `__""" ... -def sqlitelibversion() -> str: +def sqlitelibversion() -> str: """Returns the version of the SQLite library. This value is queried at run time from the library so if you use shared libraries it will be the version in the shared library. - + Calls: `sqlite3_libversion `__""" ... -def status(op: int, reset: bool = False) -> Tuple[int, int]: +def status(op: int, reset: bool = False) -> Tuple[int, int]: """Returns current and highwater measurements. - + :param op: A `status parameter `_ :param reset: If *True* then the highwater is set to the current value :returns: A tuple of current value and highwater value - + .. seealso:: - - * :ref:`Status example ` - + + * :ref:`Status example ` + Calls: `sqlite3_status64 `__""" ... using_amalgamation: bool -def vfsnames() -> List[str]: +"""If True then `SQLite amalgamation +`__ is in +use (statically compiled into APSW). Using the amalgamation means +that SQLite shared libraries are not used and will not affect your +code.""" + +def vfsnames() -> List[str]: """Returns a list of the currently installed :ref:`vfs `. The first item in the list is the default vfs.""" ... class Backup: - def __init__(self, ) -> None: ... - def close(self, force: bool = False) -> None: ... + """You create a backup instance by calling :meth:`Connection.backup`.""" + + def close(self, force: bool = False) -> None: + """Does the same thing as :meth:`~Backup.finish`. This extra api is + provided to give the same api as other APSW objects such as + :meth:`Connection.close`, :meth:`Blob.close` and + :meth:`Cursor.close`. It is safe to call this method multiple + times. + + :param force: If true then any exceptions are ignored.""" + ... + done: bool - def __enter__(self) -> Backup: ... - def __exit__(self, etype: Optional[type[BaseException]], evalue: Optional[BaseException], etraceback: Optional[TracebackType]) -> Literal[False]: ... - def finish(self) -> None: ... + """A boolean that is True if the copy completed in the last call to :meth:`~Backup.step`.""" + + def __enter__(self) -> Backup: + """You can use the backup object as a `context manager + `_ + as defined in :pep:`0343`. The :meth:`~Backup.__exit__` method ensures that backup + is :meth:`finished `.""" + ... + + def __exit__(self, etype: Optional[type[BaseException]], evalue: Optional[BaseException], etraceback: Optional[types.TracebackType]) -> Optional[bool]: + """Implements context manager in conjunction with :meth:`~Backup.__enter__` ensuring + that the copy is :meth:`finished `.""" + ... + + def finish(self) -> None: + """Completes the copy process. If all pages have been copied then the + transaction is committed on the destination database, otherwise it + is rolled back. This method must be called for your backup to take + effect. The backup object will always be finished even if there is + an exception. It is safe to call this method multiple times. + + Calls: `sqlite3_backup_finish `__""" + ... + pagecount: int + """Read only. How many pages were in the source database after the last + step. If you haven't called :meth:`~Backup.step` or the backup + object has been :meth:`finished ` then zero is + returned. + + Calls: `sqlite3_backup_pagecount `__""" + remaining: int - def step(self, npages: int = -1) -> bool: ... + """Read only. How many pages were remaining to be copied after the last + step. If you haven't called :meth:`~Backup.step` or the backup + object has been :meth:`finished ` then zero is + returned. + + Calls: `sqlite3_backup_remaining `__""" + + def step(self, npages: int = -1) -> bool: + """Copies *npages* pages from the source to destination database. The source database is locked during the copy so + using smaller values allows other access to the source database. The destination database is always locked until the + backup object is :meth:`finished `. + + :param npages: How many pages to copy. If the parameter is omitted + or negative then all remaining pages are copied. The default page + size is 1024 bytes (1kb) which can be changed before database + creation using a `pragma + `_. + + This method may throw a :exc:`BusyError` or :exc:`LockedError` if + unable to lock the source database. You can catch those and try + again. + + :returns: True if this copied the last remaining outstanding pages, else false. This is the same value as :attr:`~Backup.done` + + Calls: `sqlite3_backup_step `__""" + ... + class Blob: - def __init__(self, ) -> None: ... - def close(self, force: bool = False) -> None: ... - def __enter__(self) -> Blob: ... - def __exit__(self) -> Literal[False]: ... - def length(self) -> int: ... - def read(self, length: int = -1) -> bytes: ... - def readinto(self, buffer: Union[bytearray, array[Any], memoryview], offset: int = 0, length: int = -1) -> None: ... - def reopen(self, rowid: int) -> None: ... - def seek(self, offset: int, whence: int = 0) -> None: ... - def tell(self) -> int: ... - def write(self, data: bytes) -> None: ... + """This object is created by :meth:`Connection.blobopen` and provides + access to a blob in the database. It behaves like a Python file. + At the C level it wraps a `sqlite3_blob + `_. + + .. note:: + + You cannot change the size of a blob using this object. You should + create it with the correct size in advance either by using + :class:`zeroblob` or the `zeroblob() + `_ function. + + See the :ref:`example `.""" + + def close(self, force: bool = False) -> None: + """Closes the blob. Note that even if an error occurs the blob is + still closed. + + .. note:: + + In some cases errors that technically occurred in the + :meth:`~Blob.read` and :meth:`~Blob.write` routines may not be + reported until close is called. Similarly errors that occurred + in those methods (eg calling :meth:`~Blob.write` on a read-only + blob) may also be re-reported in :meth:`~Blob.close`. (This + behaviour is what the underlying SQLite APIs do - it is not APSW + doing it.) + + It is okay to call :meth:`~Blob.close` multiple times. + + :param force: Ignores any errors during close. + + Calls: `sqlite3_blob_close `__""" + ... + + def __enter__(self) -> Blob: + """You can use a blob as a `context manager + `_ + as defined in :pep:`0343`. When you use *with* statement, + the blob is always :meth:`closed ` on exit from the block, even if an + exception occurred in the block. + + For example:: + + with connection.blobopen() as blob: + blob.write("...") + res=blob.read(1024)""" + ... + + def __exit__(self, etype: Optional[type[BaseException]], evalue: Optional[BaseException], etraceback: Optional[types.TracebackType]) -> Optional[bool]: + """Implements context manager in conjunction with + :meth:`~Blob.__enter__`. Any exception that happened in the + *with* block is raised after closing the blob.""" + ... + + def length(self) -> int: + """Returns the size of the blob in bytes. + + Calls: `sqlite3_blob_bytes `__""" + ... + + def read(self, length: int = -1) -> bytes: + """Reads amount of data requested, or till end of file, whichever is + earlier. Attempting to read beyond the end of the blob returns an + empty bytes in the same manner as end of file on normal file + objects. Negative numbers read remaining data. + + Calls: `sqlite3_blob_read `__""" + ... + + def readinto(self, buffer: Union[bytearray, array.array[Any], memoryview], offset: int = 0, length: int = -1) -> None: + """Reads from the blob into a buffer you have supplied. This method is + useful if you already have a buffer like object that data is being + assembled in, and avoids allocating results in :meth:`Blob.read` and + then copying into buffer. + + :param buffer: A writable buffer like object. + There is a bytearray type that is very useful. + `arrays `__ also work. + + :param offset: The position to start writing into the buffer + defaulting to the beginning. + + :param length: How much of the blob to read. The default is the + remaining space left in the buffer. Note that if + there is more space available than blob left then you + will get a *ValueError* exception. + + Calls: `sqlite3_blob_read `__""" + ... + + def reopen(self, rowid: int) -> None: + """Change this blob object to point to a different row. It can be + faster than closing an existing blob an opening a new one. + + Calls: `sqlite3_blob_reopen `__""" + ... + + def seek(self, offset: int, whence: int = 0) -> None: + """Changes current position to *offset* biased by *whence*. + + :param offset: New position to seek to. Can be positive or negative number. + :param whence: Use 0 if *offset* is relative to the beginning of the blob, + 1 if *offset* is relative to the current position, + and 2 if *offset* is relative to the end of the blob. + :raises ValueError: If the resulting offset is before the beginning (less than zero) or beyond the end of the blob.""" + ... + + def tell(self) -> int: + """Returns the current offset.""" + ... + + def write(self, data: bytes) -> None: + """Writes the data to the blob. + + :param data: bytes to write + + :raises TypeError: Wrong data type + + :raises ValueError: If the data would go beyond the end of the blob. + You cannot increase the size of a blob by writing beyond the end. + You need to use :class:`zeroblob` to set the desired size first when + inserting the blob. + + Calls: `sqlite3_blob_write `__""" + ... + class Connection: - def __init__(self, filename: str, flags: int = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, vfs: Optional[str] = None, statementcachesize: int = 100): ... - def autovacuum_pages(self, callable: Optional[Callable[[str, int, int, int], int]]) -> None: ... - def backup(self, databasename: str, sourceconnection: Connection, sourcedatabasename: str) -> Backup: ... - def blobopen(self, database: str, table: str, column: str, rowid: int, writeable: bool) -> Blob: ... - def changes(self) -> int: ... - def close(self, force: bool = False) -> None: ... - def collationneeded(self, callable: Optional[Callable[[Connection, str], None]]) -> None: ... - def config(self, op: int, *args: int) -> int: ... - def createaggregatefunction(self, name: str, factory: Optional[AggregateFactory], numargs: int = -1) -> None: ... - def createcollation(self, name: str, callback: Optional[Callable[[str, str], int]]) -> None: ... - def createmodule(self, name: str, datasource: Any) -> None: ... - def createscalarfunction(self, name: str, callable: Optional[ScalarProtocol], numargs: int = -1, deterministic: bool = False) -> None: ... - def cursor(self) -> Cursor: ... - def db_filename(self, name: str) -> str: ... - def db_names(self) -> List[str]: ... - def deserialize(self, name: str, contents: bytes) -> None: ... - def enableloadextension(self, enable: bool) -> None: ... - def __enter__(self) -> Connection: ... - def __exit__(self) -> Literal[False]: ... - def filecontrol(self, dbname: str, op: int, pointer: int) -> bool: ... + """This object wraps a `sqlite3 pointer + `_.""" + + authorizer: Optional[Authorizer] + """While `preparing `_ + statements, SQLite will call any defined authorizer to see if a + particular action is ok to be part of the statement. + + Typical usage would be if you are running user supplied SQL and want + to prevent harmful operations. You should also + set the :class:`statementcachesize ` to zero. + + The authorizer callback has 5 parameters: + + * An `operation code `_ + * A string (or None) dependent on the operation `(listed as 3rd) `_ + * A string (or None) dependent on the operation `(listed as 4th) `_ + * A string name of the database (or None) + * Name of the innermost trigger or view doing the access (or None) + + The authorizer callback should return one of *SQLITE_OK*, + *SQLITE_DENY* or *SQLITE_IGNORE*. + (*SQLITE_DENY* is returned if there is an error in your + Python code). + + .. seealso:: + + * :ref:`Example ` + * :ref:`statementcache` + + Calls: `sqlite3_set_authorizer `__""" + + def autovacuum_pages(self, callable: Optional[Callable[[str, int, int, int], int]]) -> None: + """Calls `callable` to find out how many pages to autovacuum. The callback has 4 parameters: + + * Database name: str (eg "main") + * Database pages: int (how many pages make up the database now) + * Free pages: int (how many pages could be freed) + * Page size: int (page size in bytes) + + Return how many pages should be freed. Values less than zero or more than the free pages are + treated as zero or free page count. On error zero is returned. + + READ THE NOTE IN THE SQLITE DOCUMENTATION. Calling into SQLite can result in crashes, corrupt + databases or worse. + + Calls: `sqlite3_autovacuum_pages `__""" + ... + + def backup(self, databasename: str, sourceconnection: Connection, sourcedatabasename: str) -> Backup: + """Opens a :ref:`backup object `. All data will be copied from source + database to this database. + + :param databasename: Name of the database. This will be ``main`` for + the main connection and the name you specified for `attached + `_ databases. + :param sourceconnection: The :class:`Connection` to copy a database from. + :param sourcedatabasename: Name of the database in the source (eg ``main``). + + :rtype: :class:`backup` + + .. seealso:: + + * :ref:`Backup` + + Calls: `sqlite3_backup_init `__""" + ... + + def blobopen(self, database: str, table: str, column: str, rowid: int, writeable: bool) -> Blob: + """Opens a blob for :ref:`incremental I/O `. + + :param database: Name of the database. This will be ``main`` for + the main connection and the name you specified for `attached + `_ databases. + :param table: The name of the table + :param column: The name of the column + :param rowid: The id that uniquely identifies the row. + :param writeable: If True then you can read and write the blob. If False then you can only read it. + + :rtype: :class:`Blob` + + .. seealso:: + + * :ref:`Blob I/O example ` + * `SQLite row ids `_ + + Calls: `sqlite3_blob_open `__""" + ... + + def cache_stats(self, include_entries: bool = False) -> Dict[str, int]: + """Returns information about the statement cache as dict. + + .. note:: + + Calling execute with "select a; select b; insert into c ..." will + result in 3 cache entries corresponding to each of the 3 queries + present. + + The returned dictionary has the following information. + + .. list-table:: + :header-rows: 1 + :widths: auto + + * - Key + - Explanation + * - size + - Maximum number of entries in the cache + * - evictions + - How many entries were removed (expired) to make space for a newer + entry + * - no_cache + - Queries that had can_cache parameter set to False + * - hits + - A match was found in the cache + * - misses + - No match was found in the cache, or the cache couldn't be used + * - no_vdbe + - The statement was empty (eg a comment) or SQLite took action + during parsing (eg some pragmas). These are not cached and also + included in the misses count + * - too_big + - UTF8 query size was larger than considered for caching. These are also included + in the misses count. + * - max_cacheable_bytes + - Maximum size of query (in bytes of utf8) that will be considered for caching + * - entries + - (Only present if `include_entries` is True) A list of the cache entries + + If `entries` is present, then each list entry is a dict with the following information. + + .. list-table:: + :header-rows: 1 + :widths: auto + + * - Key + - Explanation + * - query + - Text of the query itself (first statement only) + * - prepare_flags + - Flags passed to `sqlite3_prepare_v3 `__ + for this query + * - uses + - How many times this entry has been (re)used + * - has_more + - Boolean indicating if there was more query text than + the first statement""" + ... + + def changes(self) -> int: + """Returns the number of database rows that were changed (or inserted + or deleted) by the most recently completed INSERT, UPDATE, or DELETE + statement. + + Calls: `sqlite3_changes64 `__""" + ... + + def close(self, force: bool = False) -> None: + """Closes the database. If there are any outstanding :class:`cursors + `, :class:`blobs ` or :class:`backups ` then + they are closed too. It is normally not necessary to call this + method as the database is automatically closed when there are no + more references. It is ok to call the method multiple times. + + If your user defined functions or collations have direct or indirect + references to the Connection then it won't be automatically garbage + collected because of circular referencing that can't be + automatically broken. Calling *close* will free all those objects + and what they reference. + + SQLite is designed to survive power failures at even the most + awkward moments. Consequently it doesn't matter if it is closed + when the process is exited, or even if the exit is graceful or + abrupt. In the worst case of having a transaction in progress, that + transaction will be rolled back by the next program to open the + database, reverting the database to a know good state. + + If *force* is *True* then any exceptions are ignored. + + Calls: `sqlite3_close `__""" + ... + + def collationneeded(self, callable: Optional[Callable[[Connection, str], None]]) -> None: + """*callable* will be called if a statement requires a `collation + `_ that hasn't been + registered. Your callable will be passed two parameters. The first + is the connection object. The second is the name of the + collation. If you have the collation code available then call + :meth:`Connection.createcollation`. + + This is useful for creating collations on demand. For example you + may include the `locale `_ in + the collation name, but since there are thousands of locales in + popular use it would not be useful to :meth:`prereigster + ` them all. Using + :meth:`~Connection.collationneeded` tells you when you need to + register them. + + .. seealso:: + + * :meth:`~Connection.createcollation` + + Calls: `sqlite3_collation_needed `__""" + ... + + def config(self, op: int, *args: int) -> int: + """:param op: A `configuration operation + `__ + :param args: Zero or more arguments as appropriate for *op* + + Only optiona that take an int and return one are implemented. + + Calls: `sqlite3_db_config `__""" + ... + + def createaggregatefunction(self, name: str, factory: Optional[AggregateFactory], numargs: int = -1) -> None: + """Registers an aggregate function. Aggregate functions operate on all + the relevant rows such as counting how many there are. + + :param name: The string name of the function. It should be less than 255 characters + :param factory: The function that will be called. Use None to delete the function. + :param numargs: How many arguments the function takes, with -1 meaning any number + + When a query starts, the *factory* will be called and must return a tuple of 3 items: + + a context object + This can be of any type + + a step function + This function is called once for each row. The first parameter + will be the context object and the remaining parameters will be + from the SQL statement. Any value returned will be ignored. + + a final function + This function is called at the very end with the context object + as a parameter. The value returned is set as the return for + the function. The final function is always called even if an + exception was raised by the step function. This allows you to + ensure any resources are cleaned up. + + .. note:: + + You can register the same named function but with different + callables and *numargs*. See + :meth:`~Connection.createscalarfunction` for an example. + + .. seealso:: + + * :ref:`Example ` + * :meth:`~Connection.createscalarfunction` + + Calls: `sqlite3_create_function_v2 `__""" + ... + + def createcollation(self, name: str, callback: Optional[Callable[[str, str], int]]) -> None: + """You can control how SQLite sorts (termed `collation + `_) when giving the + ``COLLATE`` term to a `SELECT + `_. For example your + collation could take into account locale or do numeric sorting. + + The *callback* will be called with two items. It should return -1 + if the first is less then the second, 0 if they are equal, and 1 if + first is greater:: + + def mycollation(one, two): + if one < two: + return -1 + if one == two: + return 0 + if one > two: + return 1 + + Passing None as the callback will unregister the collation. + + .. seealso:: + + * :ref:`Example ` + + Calls: `sqlite3_create_collation_v2 `__""" + ... + + def createmodule(self, name: str, datasource: Any) -> None: + """Registers a virtual table. See :ref:`virtualtables` for details. + + .. seealso:: + + * :ref:`Example ` + + Calls: `sqlite3_create_module_v2 `__""" + ... + + def createscalarfunction(self, name: str, callable: Optional[ScalarProtocol], numargs: int = -1, deterministic: bool = False) -> None: + """Registers a scalar function. Scalar functions operate on one set of parameters once. + + :param name: The string name of the function. It should be less than 255 characters + :param callable: The function that will be called. Use None to unregister. + :param numargs: How many arguments the function takes, with -1 meaning any number + :param deterministic: When True this means the function always + returns the same result for the same input arguments. + SQLite's query planner can perform additional optimisations + for deterministic functions. For example a random() + function is not deterministic while one that returns the + length of a string is. + + .. note:: + + You can register the same named function but with different + *callable* and *numargs*. For example:: + + connection.createscalarfunction("toip", ipv4convert, 4) + connection.createscalarfunction("toip", ipv6convert, 16) + connection.createscalarfunction("toip", strconvert, -1) + + The one with the correct *numargs* will be called and only if that + doesn't exist then the one with negative *numargs* will be called. + + .. seealso:: + + * :ref:`Example ` + * :meth:`~Connection.createaggregatefunction` + + Calls: `sqlite3_create_function_v2 `__""" + ... + + def cursor(self) -> Cursor: + """Creates a new :class:`Cursor` object on this database. + + :rtype: :class:`Cursor`""" + ... + + cursor_factory: Callable[[Connection], Any] + """Defaults to :class:`Cursor` + + Called with a :class:`Connection` as the only parameter when a cursor + is needed such as by the :meth:`cursor` method, or + :meth:`Connection.execute`. + + Note that whatever is returned doesn't have to be an actual + :class:`Cursor` instance, and just needs to have the methods present + that are actually called. These are likely to be `execute`, + `executemany`, `close` etc.""" + + def db_filename(self, name: str) -> str: + """Returns the full filename of the named (attached) database. The + main database is named "main". + + Calls: `sqlite3_db_filename `__""" + ... + + def db_names(self) -> List[str]: + """Returns the list of database names. For example the first database + is named 'main', the next 'temp', and the rest with the name provided + in `ATTACH `__ + + Calls: `sqlite3_db_name `__""" + ... + + def deserialize(self, name: str, contents: bytes) -> None: + """Replaces the named database with an in-memory copy of *contents*. + *name* is **"main"** for the main database, **"temp"** for the + temporary database etc. + + The resulting database is in-memory, read-write, and the memory is + owned, resized, and freed by SQLite. + + .. seealso:: + + * :meth:`Connection.serialize` + + Calls: `sqlite3_deserialize `__""" + ... + + def enableloadextension(self, enable: bool) -> None: + """Enables/disables `extension loading + `_ + which is disabled by default. + + :param enable: If True then extension loading is enabled, else it is disabled. + + Calls: `sqlite3_enable_load_extension `__ + + .. seealso:: + + * :meth:`~Connection.loadextension`""" + ... + + def __enter__(self) -> Connection: + """You can use the database as a `context manager + `_ + as defined in :pep:`0343`. When you use *with* a transaction is + started. If the block finishes with an exception then the + transaction is rolled back, otherwise it is committed. For example:: + + with connection: + connection.execute("....") + with connection: + # nested is supported + call_function(connection) + connection.execute("...") + with connection as db: + # You can also use 'as' + call_function2(db) + db.execute("...") + + Behind the scenes the `savepoint + `_ functionality introduced in + SQLite 3.6.8 is used to provide nested transactions.""" + ... + + exectrace: Optional[ExecTracer] + """Called with the cursor, statement and bindings for + each :meth:`~Cursor.execute` or :meth:`~Cursor.executemany` on this + Connection, unless the :class:`Cursor` installed its own + tracer. Your execution tracer can also abort execution of a + statement. + + If *callable* is *None* then any existing execution tracer is + removed. + + .. seealso:: + + * :ref:`tracing` + * :ref:`rowtracer` + * :attr:`Cursor.exectrace`""" + + def execute(self, statements: str, bindings: Optional[Bindings] = None, *, can_cache: bool = True, prepare_flags: int = 0) -> Cursor: + """Executes the statements using the supplied bindings. Execution + returns when the first row is available or all statements have + completed. (A cursor is automatically obtained). + + See :meth:`Cursor.execute` for more details.""" + ... + + def executemany(self, statements: str, sequenceofbindings:Sequence[Bindings], *, can_cache: bool = True, prepare_flags: int = 0) -> Cursor: + """This method is for when you want to execute the same statements over a + sequence of bindings, such as inserting into a database. (A cursor is + automatically obtained). + + See :meth:`Cursor.executemany` for more details.""" + ... + + def __exit__(self, etype: Optional[type[BaseException]], evalue: Optional[BaseException], etraceback: Optional[types.TracebackType]) -> Optional[bool]: + """Implements context manager in conjunction with + :meth:`~Connection.__enter__`. Any exception that happened in the + *with* block is raised after committing or rolling back the + savepoint.""" + ... + + def filecontrol(self, dbname: str, op: int, pointer: int) -> bool: + """Calls the :meth:`~VFSFile.xFileControl` method on the :ref:`VFS` + implementing :class:`file access ` for the database. + + :param dbname: The name of the database to affect (eg "main", "temp", attached name) + :param op: A `numeric code + `_ with values less + than 100 reserved for SQLite internal use. + :param pointer: A number which is treated as a ``void pointer`` at the C level. + + :returns: True or False indicating if the VFS understood the op. + + If you want data returned back then the *pointer* needs to point to + something mutable. Here is an example using `ctypes + `_ of + passing a Python dictionary to :meth:`~VFSFile.xFileControl` which + can then modify the dictionary to set return values:: + + obj={"foo": 1, 2: 3} # object we want to pass + objwrap=ctypes.py_object(obj) # objwrap must live before and after the call else + # it gets garbage collected + connection.filecontrol( + "main", # which db + 123, # our op code + ctypes.addressof(objwrap)) # get pointer + + The :meth:`~VFSFile.xFileControl` method then looks like this:: + + def xFileControl(self, op, pointer): + if op==123: # our op code + obj=ctypes.py_object.from_address(pointer).value + # play with obj - you can use id() to verify it is the same + print(obj["foo"]) + obj["result"]="it worked" + return True + else: + # pass to parent/superclass + return super(MyFile, self).xFileControl(op, pointer) + + This is how you set the chunk size by which the database grows. Do + not combine it into one line as the c_int would be garbage collected + before the filecontrol call is made:: + + chunksize=ctypes.c_int(32768) + connection.filecontrol("main", apsw.SQLITE_FCNTL_CHUNK_SIZE, ctypes.addressof(chunksize)) + + Calls: `sqlite3_file_control `__""" + ... + filename: str - def getautocommit(self) -> bool: ... - def getexectrace(self) -> Optional[ExecTracer]: ... - def getrowtrace(self) -> Optional[RowTracer]: ... - def interrupt(self) -> None: ... - def last_insert_rowid(self) -> int: ... - def limit(self, id: int, newval: int = -1) -> int: ... - def loadextension(self, filename: str, entrypoint: Optional[str] = None) -> None: ... + """The filename of the database. + + Calls: `sqlite3_db_filename `__""" + + def getautocommit(self) -> bool: + """Returns if the Connection is in auto commit mode (ie not in a transaction). + + Calls: `sqlite3_get_autocommit `__""" + ... + + def getexectrace(self) -> Optional[ExecTracer]: + """Returns the currently installed :attr:`execution tracer + `""" + ... + + def getrowtrace(self) -> Optional[RowTracer]: + """Returns the currently installed :attr:`row tracer + `""" + ... + + in_transaction: bool + """True if currently in a transaction, else False + + Calls: `sqlite3_get_autocommit `__""" + + def __init__(self, filename: str, flags: int = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, vfs: Optional[str] = None, statementcachesize: int = 100): + """Opens the named database. You can use ``:memory:`` to get a private temporary + in-memory database that is not shared with any other connections. + + :param flags: One or more of the `open flags `_ orred together + :param vfs: The name of the `vfs `_ to use. If *None* then the default + vfs will be used. + + :param statementcachesize: Use zero to disable the statement cache, + or a number larger than the total distinct SQL statements you + execute frequently. + + Calls: `sqlite3_open_v2 `__ + + .. seealso:: + + * :attr:`apsw.connection_hooks` + * :ref:`statementcache` + * :ref:`vfs`""" + ... + + def interrupt(self) -> None: + """Causes any pending operations on the database to abort at the + earliest opportunity. You can call this from any thread. For + example you may have a long running query when the user presses the + stop button in your user interface. :exc:`InterruptError` + will be raised in the query that got interrupted. + + Calls: `sqlite3_interrupt `__""" + ... + + def last_insert_rowid(self) -> int: + """Returns the integer key of the most recent insert in the database. + + Calls: `sqlite3_last_insert_rowid `__""" + ... + + def limit(self, id: int, newval: int = -1) -> int: + """If called with one parameter then the current limit for that *id* is + returned. If called with two then the limit is set to *newval*. + + + :param id: One of the `runtime limit ids `_ + :param newval: The new limit. This is a 32 bit signed integer even on 64 bit platforms. + + :returns: The limit in place on entry to the call. + + Calls: `sqlite3_limit `__ + + .. seealso:: + + * :ref:`Example `""" + ... + + def loadextension(self, filename: str, entrypoint: Optional[str] = None) -> None: + """Loads *filename* as an `extension `_ + + :param filename: The file to load. This must be Unicode or Unicode compatible + + :param entrypoint: The initialization method to call. If this + parameter is not supplied then the SQLite default of + ``sqlite3_extension_init`` is used. + + :raises ExtensionLoadingError: If the extension could not be + loaded. The exception string includes more details. + + Calls: `sqlite3_load_extension `__ + + .. seealso:: + + * :meth:`~Connection.enableloadextension`""" + ... + open_flags: int + """The integer flags used to open the database.""" + open_vfs: str - def overloadfunction(self, name: str, nargs: int) -> None: ... - def readonly(self, name: str) -> bool: ... - def serialize(self, name: str) -> bytes: ... - def set_last_insert_rowid(self, rowid: int) -> None: ... - def setauthorizer(self, callable: Optional[Callable[[int, Optional[str], Optional[str], Optional[str], Optional[str]], int]]) -> None: ... - def setbusyhandler(self, callable: Optional[Callable[[int], bool]]) -> None: ... - def setbusytimeout(self, milliseconds: int) -> None: ... - def setcommithook(self, callable: Optional[Callable[[], None]]) -> None: ... - def setexectrace(self, callable: Optional[ExecTracer]) -> None: ... - def setprofile(self, callable: Optional[Callable[[str, int], None]]) -> None: ... - def setprogresshandler(self, callable: Optional[Callable[[], bool]], nsteps: int = 20) -> None: ... - def setrollbackhook(self, callable: Optional[Callable[[], None]]) -> None: ... - def setrowtrace(self, callable: Optional[RowTracer]) -> None: ... - def setupdatehook(self, callable: Optional[Callable[[int, str, str, int], None]]) -> None: ... - def setwalhook(self, callable: Optional[Callable[[Connection, str, int], int]]) -> None: ... - def sqlite3pointer(self) -> int: ... - def status(self, op: int, reset: bool = False) -> Tuple[int, int]: ... - def totalchanges(self) -> int: ... - def txn_state(self, schema: Optional[str] = None) -> int: ... - def wal_autocheckpoint(self, n: int) -> None: ... - def wal_checkpoint(self, dbname: Optional[str] = None, mode: int = SQLITE_CHECKPOINT_PASSIVE) -> Tuple[int, int]: ... + """The string name of the vfs used to open the database.""" + + def overloadfunction(self, name: str, nargs: int) -> None: + """Registers a placeholder function so that a virtual table can provide an implementation via + :meth:`VTTable.FindFunction`. + + :param name: Function name + :param nargs: How many arguments the function takes + + Due to cvstrac 3507 underlying errors will not be returned. + + Calls: `sqlite3_overload_function `__""" + ... + + def readonly(self, name: str) -> bool: + """True or False if the named (attached) database was opened readonly or file + permissions don't allow writing. The main database is named "main". + + An exception is raised if the database doesn't exist. + + Calls: `sqlite3_db_readonly `__""" + ... + + rowtrace: Optional[RowTracer] + """Called with the cursor and row being returned for + :class:`cursors ` associated with this Connection, unless + the Cursor installed its own tracer. You can change the data that + is returned or cause the row to be skipped altogether. + + If *callable* is *None* then any existing row tracer is + removed. + + .. seealso:: + + * :ref:`tracing` + * :ref:`rowtracer` + * :attr:`Cursor.exectrace`""" + + def serialize(self, name: str) -> bytes: + """Returns a memory copy of the database. *name* is **"main"** for the + main database, **"temp"** for the temporary database etc. + + The memory copy is the same as if the database was backed up to + disk. + + If the database name doesn't exist or is empty, then None is + returned, not an exception (this is SQLite's behaviour). + + .. seealso:: + + * :meth:`Connection.deserialize` + + Calls: `sqlite3_serialize `__""" + ... + + def set_last_insert_rowid(self, rowid: int) -> None: + """Sets the value calls to :meth:`last_insert_rowid` will return. + + Calls: `sqlite3_set_last_insert_rowid `__""" + ... + + def setauthorizer(self, callable: Optional[Authorizer]) -> None: + """Sets the :attr:`authorizer`""" + ... + + def setbusyhandler(self, callable: Optional[Callable[[int], bool]]) -> None: + """Sets the busy handler to callable. callable will be called with one + integer argument which is the number of prior calls to the busy + callback for the same lock. If the busy callback returns False, + then SQLite returns *SQLITE_BUSY* to the calling code. If + the callback returns True, then SQLite tries to open the table + again and the cycle repeats. + + If you previously called :meth:`~Connection.setbusytimeout` then + calling this overrides that. + + Passing None unregisters the existing handler. + + .. seealso:: + + * :meth:`Connection.setbusytimeout` + * :ref:`Busy handling ` + + Calls: `sqlite3_busy_handler `__""" + ... + + def setbusytimeout(self, milliseconds: int) -> None: + """If the database is locked such as when another connection is making + changes, SQLite will keep retrying. This sets the maximum amount of + time SQLite will keep retrying before giving up. If the database is + still busy then :class:`apsw.BusyError` will be returned. + + :param milliseconds: Maximum thousandths of a second to wait. + + If you previously called :meth:`~Connection.setbusyhandler` then + calling this overrides that. + + .. seealso:: + + * :meth:`Connection.setbusyhandler` + * :ref:`Busy handling ` + + Calls: `sqlite3_busy_timeout `__""" + ... + + def setcommithook(self, callable: Optional[CommitHook]) -> None: + """*callable* will be called just before a commit. It should return + False for the commit to go ahead and True for it to be turned + into a rollback. In the case of an exception in your callable, a + True (ie rollback) value is returned. Pass None to unregister + the existing hook. + + .. seealso:: + + * :ref:`Example ` + + Calls: `sqlite3_commit_hook `__""" + ... + + def setexectrace(self, callable: Optional[ExecTracer]) -> None: + """Method to set :attr:`Connection.exectrace`""" + ... + + def setprofile(self, callable: Optional[Callable[[str, int], None]]) -> None: + """Sets a callable which is invoked at the end of execution of each + statement and passed the statement string and how long it took to + execute. (The execution time is in nanoseconds.) Note that it is + called only on completion. If for example you do a ``SELECT`` and + only read the first result, then you won't reach the end of the + statement. + + Calls: `sqlite3_profile `__""" + ... + + def setprogresshandler(self, callable: Optional[Callable[[], bool]], nsteps: int = 20) -> None: + """Sets a callable which is invoked every *nsteps* SQLite + inststructions. The callable should return True to abort + or False to continue. (If there is an error in your Python *callable* + then True/abort will be returned). + + .. seealso:: + + * :ref:`Example ` + + Calls: `sqlite3_progress_handler `__""" + ... + + def setrollbackhook(self, callable: Optional[Callable[[], None]]) -> None: + """Sets a callable which is invoked during a rollback. If *callable* + is *None* then any existing rollback hook is unregistered. + + The *callable* is called with no parameters and the return value is ignored. + + Calls: `sqlite3_rollback_hook `__""" + ... + + def setrowtrace(self, callable: Optional[RowTracer]) -> None: + """Method to set :attr:`Connection.rowtrace`""" + ... + + def setupdatehook(self, callable: Optional[Callable[[int, str, str, int], None]]) -> None: + """Calls *callable* whenever a row is updated, deleted or inserted. If + *callable* is *None* then any existing update hook is + unregistered. The update hook cannot make changes to the database while + the query is still executing, but can record them for later use or + apply them in a different connection. + + The update hook is called with 4 parameters: + + type (int) + *SQLITE_INSERT*, *SQLITE_DELETE* or *SQLITE_UPDATE* + database name (string) + This is ``main`` for the database or the name specified in + `ATTACH `_ + table name (string) + The table on which the update happened + rowid (64 bit integer) + The affected row + + .. seealso:: + + * :ref:`Example ` + + Calls: `sqlite3_update_hook `__""" + ... + + def setwalhook(self, callable: Optional[Callable[[Connection, str, int], int]]) -> None: + """*callable* will be called just after data is committed in :ref:`wal` + mode. It should return *SQLITE_OK* or an error code. The + callback is called with 3 parameters: + + * The Connection + * The database name (eg "main" or the name of an attached database) + * The number of pages in the wal log + + You can pass in None in order to unregister an existing hook. + + Calls: `sqlite3_wal_hook `__""" + ... + + def sqlite3pointer(self) -> int: + """Returns the underlying `sqlite3 * + `_ for the connection. This + method is useful if there are other C level libraries in the same + process and you want them to use the APSW connection handle. The value + is returned as a number using `PyLong_FromVoidPtr + `__ + under the hood. You should also ensure that you increment the + reference count on the :class:`Connection` for as long as the other + libraries are using the pointer. It is also a very good idea to call + :meth:`sqlitelibversion` and ensure it is the same as the other + libraries.""" + ... + + def status(self, op: int, reset: bool = False) -> Tuple[int, int]: + """Returns current and highwater measurements for the database. + + :param op: A `status parameter `_ + :param reset: If *True* then the highwater is set to the current value + :returns: A tuple of current value and highwater value + + .. seealso:: + + The :func:`status` example which works in exactly the same way. + + * :ref:`Status example ` + + Calls: `sqlite3_db_status `__""" + ... + + def totalchanges(self) -> int: + """Returns the total number of database rows that have be modified, + inserted, or deleted since the database connection was opened. + + Calls: `sqlite3_total_changes64 `__""" + ... + + def txn_state(self, schema: Optional[str] = None) -> int: + """Returns the current transaction state of the database, or a specific schema + if provided. ValueError is raised if schema is not None or a valid schema name. + :attr:`apsw.mapping_txn_state` contains the names and values returned. + + Calls: `sqlite3_txn_state `__""" + ... + + def wal_autocheckpoint(self, n: int) -> None: + """Sets how often the :ref:`wal` checkpointing is run. + + :param n: A number representing the checkpointing interval or + zero/negative to disable auto checkpointing. + + Calls: `sqlite3_wal_autocheckpoint `__""" + ... + + def wal_checkpoint(self, dbname: Optional[str] = None, mode: int = SQLITE_CHECKPOINT_PASSIVE) -> Tuple[int, int]: + """Does a WAL checkpoint. Has no effect if the database(s) are not in WAL mode. + + :param dbname: The name of the database or all databases if None + + :param mode: One of the `checkpoint modes `__. + + :return: A tuple of the size of the WAL log in frames and the + number of frames checkpointed as described in the + `documentation + `__. + + Calls: `sqlite3_wal_checkpoint_v2 `__""" + ... + class Cursor: - def __init__(self, ) -> None: ... - def close(self, force: bool = False) -> None: ... + """You obtain cursors by calling :meth:`Connection.cursor`.""" + + def close(self, force: bool = False) -> None: + """It is very unlikely you will need to call this method. It exists + because older versions of SQLite required all Connection/Cursor + activity to be confined to the same thread. That is no longer the + case. Cursors are automatically garbage collected and when there + are none left will allow the connection to be garbage collected if + it has no other references. + + A cursor is open if there are remaining statements to execute (if + your query included multiple statements), or if you called + :meth:`~Cursor.executemany` and not all of the *sequenceofbindings* + have been used yet. + + :param force: If False then you will get exceptions if there is + remaining work to do be in the Cursor such as more statements to + execute, more data from the executemany binding sequence etc. If + force is True then all remaining work and state information will be + silently discarded.""" + ... + + connection: Connection + """:class:`Connection` this cursor is using""" + description: Tuple[Tuple[str, str, None, None, None, None, None], ...] - def execute(self, statements: str, bindings: Optional[Bindings] = None) -> Cursor: ... - def executemany(self, statements: str, sequenceofbindings: Sequence[Bindings]) -> Cursor: ... - def fetchall(self) -> list[Tuple[SQLiteValue, ...]]: ... - def fetchone(self) -> Optional[Any]: ... - def getconnection(self) -> Connection: ... - def getdescription(self) -> Tuple[Tuple[str, str], ...]: ... - def getexectrace(self) -> Optional[ExecTracer]: ... - def getrowtrace(self) -> Optional[RowTracer]: ... - def __iter__(self: Cursor) -> Cursor: ... - def __next__(self: Cursor) -> Any: ... - def setexectrace(self, callable: Optional[ExecTracer]) -> None: ... - def setrowtrace(self, callable: Optional[RowTracer]) -> None: ... + """Based on the `DB-API cursor property + `__, this returns the + same as :meth:`getdescription` but with 5 Nones appended. See + also :issue:`131`.""" + + description_full: Tuple[Tuple[str, str, str, str, str], ...] + """Only present if SQLITE_ENABLE_COLUMN_METADATA was defined at + compile time. + + Returns all information about the query result columns. In + addition to the name and declared type, you also get the database + name, table name, and origin name. + + Calls: + * `sqlite3_column_name `__ + * `sqlite3_column_decltype `__ + * `sqlite3_column_database_name `__ + * `sqlite3_column_table_name `__ + * `sqlite3_column_origin_name `__""" + + exectrace: Optional[ExecTracer] + """Called with the cursor, statement and bindings for + each :meth:`~Cursor.execute` or :meth:`~Cursor.executemany` on this + cursor. + + If *callable* is *None* then any existing execution tracer is + unregistered. + + .. seealso:: + + * :ref:`tracing` + * :ref:`executiontracer` + * :attr:`Connection.exectrace`""" + + def execute(self, statements: str, bindings: Optional[Bindings] = None, *, can_cache: bool = True, prepare_flags: int = 0) -> Cursor: + """Executes the statements using the supplied bindings. Execution + returns when the first row is available or all statements have + completed. + + :param statements: One or more SQL statements such as ``select * + from books`` or ``begin; insert into books ...; select + last_insert_rowid(); end``. + :param bindings: If supplied should either be a sequence or a dictionary. Each item must be one of the :ref:`supported types ` + :param can_cache: If False then the statement cache will not be used to find an already prepared query, nor will it be + placed in the cache after execution + :param prepare_flags: `flags `__ passed to + `sqlite_prepare_v3 `__ + + If you use numbered bindings in the query then supply a sequence. + Any sequence will work including lists and iterators. For + example:: + + cursor.execute("insert into books values(?,?)", ("title", "number")) + + .. note:: + + A common gotcha is wanting to insert a single string but not + putting it in a tuple:: + + cursor.execute("insert into books values(?)", "a title") + + The string is a sequence of 8 characters and so it will look + like you are supplying 8 bindings when only one is needed. Use + a one item tuple with a trailing comma like this:: + + cursor.execute("insert into books values(?)", ("a title",) ) + + If you used names in the statement then supply a dictionary as the + binding. It is ok to be missing entries from the dictionary - + None/null will be used. For example:: + + cursor.execute("insert into books values(:title, :isbn, :rating)", + {"title": "book title", "isbn": 908908908}) + + The return is the cursor object itself which is also an iterator. This allows you to write:: + + for row in cursor.execute("select * from books"): + print(row) + + :raises TypeError: The bindings supplied were neither a dict nor a sequence + :raises BindingsError: You supplied too many or too few bindings for the statements + :raises IncompleteExecutionError: There are remaining unexecuted queries from your last execute + + Calls: + * `sqlite3_prepare_v3 `__ + * `sqlite3_step `__ + * `sqlite3_bind_int64 `__ + * `sqlite3_bind_null `__ + * `sqlite3_bind_text `__ + * `sqlite3_bind_double `__ + * `sqlite3_bind_blob `__ + * `sqlite3_bind_zeroblob `__""" + ... + + def executemany(self, statements: str, sequenceofbindings: Sequence[Bindings], *, can_cache: bool = True, prepare_flags: int = 0) -> Cursor: + """This method is for when you want to execute the same statements over + a sequence of bindings. Conceptually it does this:: + + for binding in sequenceofbindings: + cursor.execute(statements, binding) + + Example:: + + rows=( (1, 7), + (2, 23), + (4, 92), + (12, 12) ) + + cursor.executemany("insert into nums values(?,?)", rows) + + The return is the cursor itself which acts as an iterator. Your + statements can return data. See :meth:`~Cursor.execute` for more + information.""" + ... + + expanded_sql: str + """The SQL text with bound parameters expanded. For example:: + + execute("select ?, ?", (3, "three")) + + would return:: + + select 3, 'three' + + Note that while SQLite supports nulls in strings, their implementation + of sqlite3_expanded_sql stops at the first null. + + Calls: `sqlite3_expanded_sql `__""" + + def fetchall(self) -> list[Tuple[SQLiteValue, ...]]: + """Returns all remaining result rows as a list. This method is defined + in DBAPI. It is a longer way of doing ``list(cursor)``.""" + ... + + def fetchone(self) -> Optional[Any]: + """Returns the next row of data or None if there are no more rows.""" + ... + + def getconnection(self) -> Connection: + """Returns the :attr:`connection` this cursor is using""" + ... + + def getdescription(self) -> Tuple[Tuple[str, str], ...]: + """If you are trying to get information about a table or view, + then `pragma table_info `__ + is better. + + Returns a tuple describing each column in the result row. The + return is identical for every row of the results. You can only + call this method once you have started executing a statement and + before you have finished:: + + # This will error + cursor.getdescription() + + for row in cursor.execute("select ....."): + # this works + print (cursor.getdescription()) + print (row) + + The information about each column is a tuple of ``(column_name, + declared_column_type)``. The type is what was declared in the + ``CREATE TABLE`` statement - the value returned in the row will be + whatever type you put in for that row and column. (This is known + as `manifest typing `_ + which is also the way that Python works. The variable ``a`` could + contain an integer, and then you could put a string in it. Other + static languages such as C or other SQL databases only let you put + one type in - eg ``a`` could only contain an integer or a string, + but never both.) + + Example:: + + cursor.execute("create table books(title string, isbn number, wibbly wobbly zebra)") + cursor.execute("insert into books values(?,?,?)", (97, "fjfjfj", 3.7)) + cursor.execute("insert into books values(?,?,?)", ("fjfjfj", 3.7, 97)) + + for row in cursor.execute("select * from books"): + print (cursor.getdescription()) + print (row) + + Output:: + + # row 0 - description + (('title', 'string'), ('isbn', 'number'), ('wibbly', 'wobbly zebra')) + # row 0 - values + (97, 'fjfjfj', 3.7) + # row 1 - description + (('title', 'string'), ('isbn', 'number'), ('wibbly', 'wobbly zebra')) + # row 1 - values + ('fjfjfj', 3.7, 97) + + Calls: + * `sqlite3_column_name `__ + * `sqlite3_column_decltype `__""" + ... + + def getexectrace(self) -> Optional[ExecTracer]: + """Returns the currently installed :attr:`execution tracer + ` + + .. seealso:: + + * :ref:`tracing`""" + ... + + def getrowtrace(self) -> Optional[RowTracer]: + """Returns the currently installed (via :meth:`~Cursor.setrowtrace`) + row tracer. + + .. seealso:: + + * :ref:`tracing`""" + ... + + is_explain: int + """Returns 0 if executing a normal query, 1 if it is an EXPLAIN query, + and 2 if an EXPLAIN QUERY PLAN query. + + Calls: `sqlite3_stmt_isexplain `__""" + + is_readonly: bool + """Returns True if the current query does not change the database. + + Note that called functions, virtual tables etc could make changes though. + + Calls: `sqlite3_stmt_readonly `__""" + + def __iter__(self: Cursor) -> Cursor: + """Cursors are iterators""" + ... + + def __next__(self: Cursor) -> Any: + """Cursors are iterators""" + ... + + rowtrace: Optional[RowTracer] + """Called with cursor and row being returned. You can + change the data that is returned or cause the row to be skipped + altogether. + + If *callable* is *None* then any existing row tracer is + unregistered. + + .. seealso:: + + * :ref:`tracing` + * :ref:`rowtracer` + * :attr:`Connection.rowtrace`""" + + def setexectrace(self, callable: Optional[ExecTracer]) -> None: + """Sets the :attr:`execution tracer `""" + ... + + def setrowtrace(self, callable: Optional[RowTracer]) -> None: + """Sets the :attr:`row tracer `""" + ... + class URIFilename: - def __init__(self, ) -> None: ... - def filename(self) -> str: ... - def uri_boolean(self, name: str, default: bool) -> bool: ... - def uri_int(self, name: str, default: int) -> int: ... - def uri_parameter(self, name: str) -> Optional[str]: ... + """SQLite uses a convoluted method of storing `uri parameters + `__ after the filename binding the + C filename representation and parameters together. This class + encapsulates that binding. The :ref:`example ` shows + usage of this class. + + Your :meth:`VFS.xOpen` method will generally be passed one of + these instead of a string as the filename if the URI flag was used + or the main database flag is set. + + You can safely pass it on to the :class:`VFSFile` constructor + which knows how to get the name back out.""" + + def filename(self) -> str: + """Returns the filename.""" + ... + + def uri_boolean(self, name: str, default: bool) -> bool: + """Returns the boolean value for parameter `name` or `default` if not + present. + + Calls: `sqlite3_uri_boolean `__""" + ... + + def uri_int(self, name: str, default: int) -> int: + """Returns the integer value for parameter `name` or `default` if not + present. + + Calls: `sqlite3_uri_int64 `__""" + ... + + def uri_parameter(self, name: str) -> Optional[str]: + """Returns the value of parameter `name` or None. + + Calls: `sqlite3_uri_parameter `__""" + ... + class VFSFile: - def __init__(self, vfs: str, filename: Union[str,URIFilename], flags: List[int]): ... - def excepthook(self, etype: type[BaseException], evalue: BaseException, etraceback: Optional[TracebackType]) ->None: ... - def xCheckReservedLock(self) -> bool: ... - def xClose(self) -> None: ... - def xDeviceCharacteristics(self) -> int: ... - def xFileControl(self, op: int, ptr: int) -> bool: ... - def xFileSize(self) -> int: ... - def xLock(self, level: int) -> None: ... - def xRead(self, amount: int, offset: int) -> bytes: ... - def xSectorSize(self) -> int: ... - def xSync(self, flags: int) -> None: ... - def xTruncate(self, newsize: int) -> None: ... - def xUnlock(self, level: int) -> None: ... - def xWrite(self, data: bytes, offset: int) -> None: ... + """Wraps access to a file. You only need to derive from this class + if you want the file object returned from :meth:`VFS.xOpen` to + inherit from an existing VFS implementation. + + .. note:: + + All file sizes and offsets are 64 bit quantities even on 32 bit + operating systems.""" + + def excepthook(self, etype: type[BaseException], evalue: BaseException, etraceback: Optional[types.TracebackType]) ->None: + """Called when there has been an exception in a :class:`VFSFile` + routine. The default implementation calls ``sys.excepthook`` and + if that fails then ``PyErr_Display``. The three arguments + correspond to what ``sys.exc_info()`` would return. + + :param etype: The exception type + :param evalue: The exception value + :param etraceback: The exception traceback. Note this + includes all frames all the way up to the thread being started.""" + ... + + def __init__(self, vfs: str, filename: Union[str,URIFilename], flags: List[int]): + """:param vfs: The vfs you want to inherit behaviour from. You can + use an empty string ``""`` to inherit from the default vfs. + :param name: The name of the file being opened. May be an instance of :class:`URIFilename`. + :param flags: A two item list ``[inflags, outflags]`` as detailed in :meth:`VFS.xOpen`. + + :raises ValueError: If the named VFS is not registered. + + .. note:: + + If the VFS that you inherit from supports :ref:`write ahead + logging ` then your :class:`VFSFile` will also support the + xShm methods necessary to implement wal. + + .. seealso:: + + :meth:`VFS.xOpen`""" + ... + + def xCheckReservedLock(self) -> bool: + """Returns True if any database connection (in this or another process) + has a lock other than `SQLITE_LOCK_NONE or SQLITE_LOCK_SHARED + `_.""" + ... + + def xClose(self) -> None: + """Close the database. Note that even if you return an error you should + still close the file. It is safe to call this method multiple + times.""" + ... + + def xDeviceCharacteristics(self) -> int: + """Return `I/O capabilities + `_ (bitwise or of + appropriate values). If you do not implement the function or have an + error then 0 (the SQLite default) is returned.""" + ... + + def xFileControl(self, op: int, ptr: int) -> bool: + """Receives `file control + `_ request typically + issued by :meth:`Connection.filecontrol`. See + :meth:`Connection.filecontrol` for an example of how to pass a + Python object to this routine. + + :param op: A numeric code. Codes below 100 are reserved for SQLite + internal use. + :param ptr: An integer corresponding to a pointer at the C level. + + :returns: A boolean indicating if the op was understood + + As of SQLite 3.6.10, this method is called by SQLite if you have + inherited from an underlying VFSFile. Consequently ensure you pass + any unrecognised codes through to your super class. For example:: + + def xFileControl(self, op, ptr): + if op==1027: + process_quick(ptr) + elif op==1028: + obj=ctypes.py_object.from_address(ptr).value + else: + # this ensures superclass implementation is called + return super(MyFile, self).xFileControl(op, ptr) + # we understood the op + return True""" + ... + + def xFileSize(self) -> int: + """Return the size of the file in bytes. Remember that file sizes are + 64 bit quantities even on 32 bit operating systems.""" + ... + + def xLock(self, level: int) -> None: + """Increase the lock to the level specified which is one of the + `SQLITE_LOCK `_ + family of constants. If you can't increase the lock level because + someone else has locked it, then raise :exc:`BusyError`.""" + ... + + def xRead(self, amount: int, offset: int) -> bytes: + """Read the specified *amount* of data starting at *offset*. You + should make every effort to read all the data requested, or return + an error. If you have the file open for non-blocking I/O or if + signals happen then it is possible for the underlying operating + system to do a partial read. You will need to request the + remaining data. Except for empty files SQLite considers short + reads to be a fatal error. + + :param amount: Number of bytes to read + :param offset: Where to start reading. This number may be 64 bit once the database is larger than 2GB.""" + ... + + def xSectorSize(self) -> int: + """Return the native underlying sector size. SQLite uses the value + returned in determining the default database page size. If you do + not implement the function or have an error then 4096 (the SQLite + default) is returned.""" + ... + + def xSync(self, flags: int) -> None: + """Ensure data is on the disk platters (ie could survive a power + failure immediately after the call returns) with the `sync flags + `_ detailing what + needs to be synced. You can sync more than what is requested.""" + ... + + def xTruncate(self, newsize: int) -> None: + """Set the file length to *newsize* (which may be more or less than the + current length).""" + ... + + def xUnlock(self, level: int) -> None: + """Decrease the lock to the level specified which is one of the + `SQLITE_LOCK `_ + family of constants.""" + ... + + def xWrite(self, data: bytes, offset: int) -> None: + """Write the *data* starting at absolute *offset*. You must write all the data + requested, or return an error. If you have the file open for + non-blocking I/O or if signals happen then it is possible for the + underlying operating system to do a partial write. You will need to + write the remaining data. + + :param offset: Where to start writing. This number may be 64 bit once the database is larger than 2GB.""" + ... + class VFS: - def __init__(self, name: str, base: Optional[str] = None, makedefault: bool = False, maxpathname: int = 1024): ... - def excepthook(self, etype: type[BaseException], evalue: BaseException, etraceback: Optional[TracebackType]) -> Any: ... - def unregister(self) -> None: ... - def xAccess(self, pathname: str, flags: int) -> bool: ... - def xCurrentTime(self) -> float: ... - def xDelete(self, filename: str, syncdir: bool) -> None: ... - def xDlClose(self, handle: int) -> None: ... - def xDlError(self) -> str: ... - def xDlOpen(self, filename: str) -> int: ... - def xDlSym(self, handle: int, symbol: str) -> int: ... - def xFullPathname(self, name: str) -> str: ... - def xGetLastError(self) -> Tuple[int, str]: ... - def xGetSystemCall(self, name: str) -> Optional[int]: ... - def xNextSystemCall(self, name: Optional[str]) -> Optional[str]: ... - def xOpen(self, name: Optional[Union[str,URIFilename]], flags: List[int]) -> VFSFile: ... - def xRandomness(self, numbytes: int) -> bytes: ... - def xSetSystemCall(self, name: Optional[str], pointer: int) -> bool: ... - def xSleep(self, microseconds: int) -> int: ... + """Provides operating system access. You can get an overview in the + `SQLite documentation `_. To + create a VFS your Python class must inherit from :class:`VFS`.""" + + def excepthook(self, etype: type[BaseException], evalue: BaseException, etraceback: Optional[types.TracebackType]) -> Any: + """Called when there has been an exception in a :class:`VFS` routine. + The default implementation passes args to ``sys.excepthook`` and if that + fails then ``PyErr_Display``. The three arguments correspond to + what ``sys.exc_info()`` would return.""" + ... + + def __init__(self, name: str, base: Optional[str] = None, makedefault: bool = False, maxpathname: int = 1024): + """:param name: The name to register this vfs under. If the name + already exists then this vfs will replace the prior one of the + same name. Use :meth:`apsw.vfsnames` to get a list of + registered vfs names. + + :param base: If you would like to inherit behaviour from an already registered vfs then give + their name. To inherit from the default vfs, use a zero + length string ``""`` as the name. + + :param makedefault: If true then this vfs will be registered as the default, and will be + used by any opens that don't specify a vfs. + + :param maxpathname: The maximum length of database name in bytes when + represented in UTF-8. If a pathname is passed in longer than + this value then SQLite will not `be able to open it + `__. + + :raises ValueError: If *base* is not *None* and the named vfs is not + currently registered. + + Calls: + * `sqlite3_vfs_register `__ + * `sqlite3_vfs_find `__""" + ... + + def unregister(self) -> None: + """Unregisters the VFS making it unavailable to future database + opens. You do not need to call this as the VFS is automatically + unregistered by when the VFS has no more references or open + databases using it. It is however useful to call if you have made + your VFS be the default and wish to immediately make it be + unavailable. It is safe to call this routine multiple times. + + Calls: `sqlite3_vfs_unregister `__""" + ... + + def xAccess(self, pathname: str, flags: int) -> bool: + """SQLite wants to check access permissions. Return True or False + accordingly. + + :param pathname: File or directory to check + :param flags: One of the `access flags `_""" + ... + + def xCurrentTime(self) -> float: + """Return the `Julian Day Number + `_ as a floating point + number where the integer portion is the day and the fractional part + is the time. Do not adjust for timezone (ie use `UTC + `_).""" + ... + + def xDelete(self, filename: str, syncdir: bool) -> None: + """Delete the named file. If the file is missing then raise an + :exc:`IOError` exception with extendedresult + *SQLITE_IOERR_DELETE_NOENT* + + :param filename: File to delete + + :param syncdir: If True then the directory should be synced + ensuring that the file deletion has been recorded on the disk + platters. ie if there was an immediate power failure after this + call returns, on a reboot the file would still be deleted.""" + ... + + def xDlClose(self, handle: int) -> None: + """Close and unload the library corresponding to the handle you + returned from :meth:`~VFS.xDlOpen`. You can use ctypes to do + this:: + + def xDlClose(handle): + # Note leading underscore in _ctypes + _ctypes.dlclose(handle) # Linux/Mac/Unix + _ctypes.FreeLibrary(handle) # Windows""" + ... + + def xDlError(self) -> str: + """Return an error string describing the last error of + :meth:`~VFS.xDlOpen` or :meth:`~VFS.xDlSym` (ie they returned + zero/NULL). If you do not supply this routine then SQLite provides + a generic message. To implement this method, catch exceptions in + :meth:`~VFS.xDlOpen` or :meth:`~VFS.xDlSym`, turn them into + strings, save them, and return them in this routine. If you have + an error in this routine or return None then SQLite's generic + message will be used.""" + ... + + def xDlOpen(self, filename: str) -> int: + """Load the shared library. You should return a number which will be + treated as a void pointer at the C level. On error you should + return 0 (NULL). The number is passed as is to + :meth:`~VFS.xDlSym`/:meth:`~VFS.xDlClose` so it can represent + anything that is convenient for you (eg an index into an + array). You can use ctypes to load a library:: + + def xDlOpen(name): + return ctypes.cdll.LoadLibrary(name)._handle""" + ... + + def xDlSym(self, handle: int, symbol: str) -> int: + """Returns the address of the named symbol which will be called by + SQLite. On error you should return 0 (NULL). You can use ctypes:: + + def xDlSym(ptr, name): + return _ctypes.dlsym (ptr, name) # Linux/Unix/Mac etc (note leading underscore) + return ctypes.win32.kernel32.GetProcAddress (ptr, name) # Windows + + :param handle: The value returned from an earlier :meth:`~VFS.xDlOpen` call + :param symbol: A string""" + ... + + def xFullPathname(self, name: str) -> str: + """Return the absolute pathname for name. You can use ``os.path.abspath`` to do this.""" + ... + + def xGetLastError(self) -> Tuple[int, str]: + """This method is to return an integer error code and (optional) text describing + the last error that happened in this thread. + + .. note:: SQLite 3.12 changed the semantics in an incompatible way from + earlier versions. You will need to rewrite earlier implementations.""" + ... + + def xGetSystemCall(self, name: str) -> Optional[int]: + """Returns a pointer for the current method implementing the named + system call. Return None if the call does not exist.""" + ... + + def xNextSystemCall(self, name: Optional[str]) -> Optional[str]: + """This method is repeatedly called to iterate over all of the system + calls in the vfs. When called with None you should return the + name of the first system call. In subsequent calls return the + name after the one passed in. If name is the last system call + then return None. + + .. note:: + + Because of internal SQLite implementation semantics memory will + be leaked on each call to this function. Consequently you + should build up the list of call names once rather than + repeatedly doing it.""" + ... + + def xOpen(self, name: Optional[Union[str,URIFilename]], flags: List[int]) -> VFSFile: + """This method should return a new file object based on name. You + can return a :class:`VFSFile` from a completely different VFS. + + :param name: File to open. Note that *name* may be *None* in which + case you should open a temporary file with a name of your + choosing. May be an instance of :class:`URIFilename`. + + :param flags: A list of two integers ``[inputflags, + outputflags]``. Each integer is one or more of the `open flags + `_ binary orred + together. The ``inputflags`` tells you what SQLite wants. For + example *SQLITE_OPEN_DELETEONCLOSE* means the file should + be automatically deleted when closed. The ``outputflags`` + describes how you actually did open the file. For example if you + opened it read only then *SQLITE_OPEN_READONLY* should be + set.""" + ... + + def xRandomness(self, numbytes: int) -> bytes: + """This method is called once when SQLite needs to seed the random + number generator. It is called on the default VFS only. It is not + called again, even across :meth:`apsw.shutdown` calls. You can + return less than the number of bytes requested including None. If + you return more then the surplus is ignored.""" + ... + + def xSetSystemCall(self, name: Optional[str], pointer: int) -> bool: + """Change a system call used by the VFS. This is useful for testing + and some other scenarios such as sandboxing. + + :param name: The string name of the system call + + :param pointer: A pointer provided as an int. There is no + reference counting or other memory tracking of the pointer. If + you provide one you need to ensure it is around for the lifetime + of this and any other related VFS. + + Raise an exception to return an error. If the system call does + not exist then raise :exc:`NotFoundError`. + + If `name` is None, then all systemcalls are reset to their defaults. This + behaviour is not documented. + + :returns: True if the system call was set. False if the system + call is not known.""" + ... + + def xSleep(self, microseconds: int) -> int: + """Pause execution of the thread for at least the specified number of + microseconds (millionths of a second). This routine is typically called from the busy handler. + + :returns: How many microseconds you actually requested the + operating system to sleep for. For example if your operating + system sleep call only takes seconds then you would have to have + rounded the microseconds number up to the nearest second and + should return that rounded up value.""" + ... + + +if sys.version_info >= (3, 8): + + class VTCursor(Protocol): + """.. note:: + + There is no actual *VTCursor* class - it is shown this way for + documentation convenience and is present as a `typing protocol + `__. + Your cursor instance should implement all the methods documented + here. + + + The :class:`VTCursor` object is used for iterating over a table. + There may be many cursors simultaneously so each one needs to keep + track of where :ref:`Virtual table structure ` + it is. + + .. seealso:: + + :ref:`Virtual table structure `""" + + def Close(self) -> None: + """This is the destructor for the cursor. Note that you must + cleanup. The method will not be called again if you raise an + exception.""" + ... + + def Column(self, number: int) -> SQLiteValue: + """Requests the value of the specified column *number* of the current + row. If *number* is -1 then return the rowid. + + :returns: Must be one one of the :ref:`5 + supported types `""" + ... + + def Eof(self) -> bool: + """Called to ask if we are at the end of the table. It is called after each call to Filter and Next. + + :returns: False if the cursor is at a valid row of data, else True + + .. note:: + + This method can only return True or False to SQLite. If you have + an exception in the method or provide a non-boolean return then + True (no more data) will be returned to SQLite.""" + ... + + def Filter(self, indexnum: int, indexname: str, constraintargs: Optional[Tuple]) -> None: + """This method is always called first to initialize an iteration to the + first row of the table. The arguments come from the + :meth:`~VTTable.BestIndex` method in the :class:`table ` + object with constraintargs being a tuple of the constraints you + requested. If you always return None in BestIndex then indexnum will + be zero, indexstring will be None and constraintargs will be empty).""" + ... + + def Next(self) -> None: + """Move the cursor to the next row. Do not have an exception if there + is no next row. Instead return False when :meth:`~VTCursor.Eof` is + subsequently called. + + If you said you had indices in your :meth:`VTTable.BestIndex` + return, and they were selected for use as provided in the parameters + to :meth:`~VTCursor.Filter` then you should move to the next + appropriate indexed and constrained row.""" + ... + + def Rowid(self) -> int: + """Return the current rowid.""" + ... + + +if sys.version_info >= (3, 8): + + class VTModule(Protocol): + """.. note:: + + There is no actual *VTModule* class - it is shown this way for + documentation convenience and is present as a `typing protocol + `__. + Your module instance should implement all the methods documented here. + + A module instance is used to create the virtual tables. Once you have + a module object, you register it with a connection by calling + :meth:`Connection.createmodule`:: + + # make an instance + mymod=MyModuleClass() + + # register the vtable on connection con + con.createmodule("modulename", mymod) + + # tell SQLite about the table + con.execute("create VIRTUAL table tablename USING modulename('arg1', 2)") + + The create step is to tell SQLite about the existence of the table. + Any number of tables referring to the same module can be made this + way. Note the (optional) arguments which are passed to the module.""" + + def Connect(self, connection: Connection, modulename: str, databasename: str, tablename: str, *args: Tuple[SQLiteValue, ...]) -> Tuple[str, VTTable]: + """The parameters and return are identical to + :meth:`~VTModule.Create`. This method is called + when there are additional references to the table. :meth:`~VTModule.Create` will be called the first time and + :meth:`~VTModule.Connect` after that. + + The advise is to create caches, generated data and other + heavyweight processing on :meth:`~VTModule.Create` calls and then + find and reuse that on the subsequent :meth:`~VTModule.Connect` + calls. + + The corresponding call is :meth:`VTTable.Disconnect`. If you have a simple virtual table implementation, then just + set :meth:`~VTModule.Connect` to be the same as :meth:`~VTModule.Create`:: + + class MyModule: + + def Create(self, connection, modulename, databasename, tablename, *args): + # do lots of hard work + + Connect=Create""" + ... + + def Create(self, connection: Connection, modulename: str, databasename: str, tablename: str, *args: Tuple[SQLiteValue, ...]) -> Tuple[str, VTTable]: + """Called when a table is first created on a :class:`connection + `. + + :param connection: An instance of :class:`Connection` + :param modulename: The string name under which the module was :meth:`registered ` + :param databasename: The name of the database. This will be ``main`` for directly opened files and the name specified in + `ATTACH `_ statements. + :param tablename: Name of the table the user wants to create. + :param args: Any arguments that were specified in the `create virtual table `_ statement. + + :returns: A list of two items. The first is a SQL `create table `_ statement. The + columns are parsed so that SQLite knows what columns and declared types exist for the table. The second item + is an object that implements the :class:`table ` methods. + + The corresponding call is :meth:`VTTable.Destroy`.""" + ... + + +if sys.version_info >= (3, 8): + + class VTTable(Protocol): + """.. note:: + + There is no actual *VTTable* class - it is shown this way for + documentation convenience and is present as a `typing protocol + `__. + Your table instance should implement the methods documented here. + + The :class:`VTTable` object contains knowledge of the indices, makes + cursors and can perform transactions. + + + .. _vtablestructure: + + A virtual table is structured as a series of rows, each of which has + the same columns. The value in a column must be one of the `5 + supported types `_, but the + type can be different between rows for the same column. The virtual + table routines identify the columns by number, starting at zero. + + Each row has a **unique** 64 bit integer `rowid + `_ with the :class:`Cursor + ` routines operating on this number, as well as some of + the :class:`Table ` routines such as :meth:`UpdateChangeRow + `.""" + + def Begin(self) -> None: + """This function is used as part of transactions. You do not have to + provide the method.""" + ... + + def BestIndex(self, constraints: Sequence[Tuple[int, int], ...], orderbys: Sequence[Tuple[int, int], ...]) -> Any: + """This is a complex method. To get going initially, just return + *None* and you will be fine. Implementing this method reduces + the number of rows scanned in your table to satisfy queries, but + only if you have an index or index like mechanism available. + + .. note:: + + The implementation of this method differs slightly from the + `SQLite documentation + `__ + for the C API. You are not passed "unusable" constraints. The + argv/constraintarg positions are not off by one. In the C api, you + have to return position 1 to get something passed to + :meth:`VTCursor.Filter` in position 0. With the APSW + implementation, you return position 0 to get Filter arg 0, + position 1 to get Filter arg 1 etc. + + The purpose of this method is to ask if you have the ability to + determine if a row meets certain constraints that doesn't involve + visiting every row. An example constraint is ``price > 74.99``. In a + traditional SQL database, queries with constraints can be speeded up + `with indices `_. If + you return None, then SQLite will visit every row in your table and + evaluate the constraint itself. Your index choice returned from + BestIndex will also be passed to the :meth:`~VTCursor.Filter` method on your cursor + object. Note that SQLite may call this method multiple times trying + to find the most efficient way of answering a complex query. + + **constraints** + + You will be passed the constraints as a sequence of tuples containing two + items. The first item is the column number and the second item is + the operation. + + Example query: ``select * from foo where price > 74.99 and + quantity<=10 and customer='Acme Widgets'`` + + If customer is column 0, price column 2 and quantity column 5 + then the constraints will be:: + + (2, apsw.SQLITE_INDEX_CONSTRAINT_GT), + (5, apsw.SQLITE_INDEX_CONSTRAINT_LE), + (0, apsw.SQLITE_INDEX_CONSTRAINT_EQ) + + Note that you do not get the value of the constraint (ie "Acme + Widgets", 74.99 and 10 in this example). + + If you do have any suitable indices then you return a sequence the + same length as constraints with the members mapping to the + constraints in order. Each can be one of None, an integer or a tuple + of an integer and a boolean. Conceptually SQLite is giving you a + list of constraints and you are returning a list of the same length + describing how you could satisfy each one. + + Each list item returned corresponding to a constraint is one of: + + None + This means you have no index for that constraint. SQLite + will have to iterate over every row for it. + + integer + This is the argument number for the constraintargs being passed + into the :meth:`~VTCursor.Filter` function of your + :class:`cursor ` (the values "Acme Widgets", 74.99 + and 10 in the example). + + (integer, boolean) + By default SQLite will check what you return. For example if + you said that you had an index on price, SQLite will still + check that each row you returned is greater than 74.99. If you + set the boolean to False then SQLite won't do that double + checking. + + Example query: ``select * from foo where price > 74.99 and + quantity<=10 and customer=='Acme Widgets'``. customer is column 0, + price column 2 and quantity column 5. You can index on customer + equality and price. + + +----------------------------------------+--------------------------------+ + | Constraints (in) | Constraints used (out) | + +========================================+================================+ + | :: | :: | + | | | + | (2, apsw.SQLITE_INDEX_CONSTRAINT_GT), | 1, | + | (5, apsw.SQLITE_INDEX_CONSTRAINT_LE), | None, | + | (0, apsw.SQLITE_INDEX_CONSTRAINT_EQ) | 0 | + | | | + +----------------------------------------+--------------------------------+ + + When your :class:`~VTCursor.Filter` method in the cursor is called, + constraintarg[0] will be "Acme Widgets" (customer constraint value) + and constraintarg[1] will be 74.99 (price constraint value). You can + also return an index number (integer) and index string to use + (SQLite attaches no significance to these values - they are passed + as is to your :meth:`VTCursor.Filter` method as a way for the + BestIndex method to let the :meth:`~VTCursor.Filter` method know + which of your indices or similar mechanism to use. + + **orderbys** + + + The second argument to BestIndex is a sequence of orderbys because + the query requested the results in a certain order. If your data is + already in that order then SQLite can give the results back as + is. If not, then SQLite will have to sort the results first. + + Example query: ``select * from foo order by price desc, quantity asc`` + + Price is column 2, quantity column 5 so orderbys will be:: + + (2, True), # True means descending, False is ascending + (5, False) + + **Return** + + You should return up to 5 items. Items not present in the return have a default value. + + 0: constraints used (default None) + This must either be None or a sequence the same length as + constraints passed in. Each item should be as specified above + saying if that constraint is used, and if so which constraintarg + to make the value be in your :meth:`VTCursor.Filter` function. + + 1: index number (default zero) + This value is passed as is to :meth:`VTCursor.Filter` + + 2: index string (default None) + This value is passed as is to :meth:`VTCursor.Filter` + + 3: orderby consumed (default False) + Return True if your output will be in exactly the same order as the orderbys passed in + + 4: estimated cost (default a huge number) + Approximately how many disk operations are needed to provide the + results. SQLite uses the cost to optimise queries. For example if + the query includes *A or B* and A has 2,000 operations and B has 100 + then it is best to evaluate B before A. + + **A complete example** + + Query is ``select * from foo where price>74.99 and quantity<=10 and + customer=="Acme Widgets" order by price desc, quantity asc``. + Customer is column 0, price column 2 and quantity column 5. You can + index on customer equality and price. + + :: + + BestIndex(constraints, orderbys) + + constraints= ( (2, apsw.SQLITE_INDEX_CONSTRAINT_GT), + (5, apsw.SQLITE_INDEX_CONSTRAINT_LE), + (0, apsw.SQLITE_INDEX_CONSTRAINT_EQ) ) + + orderbys= ( (2, True), (5, False) ) + + + # You return + + ( (1, None, 0), # constraints used + 27, # index number + "idx_pr_cust", # index name + False, # results are not in orderbys order + 1000 # about 1000 disk operations to access index + ) + + + # Your Cursor.Filter method will be called with: + + 27, # index number you returned + "idx_pr_cust", # index name you returned + "Acme Widgets", # constraintarg[0] - customer + 74.99 # constraintarg[1] - price""" + ... + + def Commit(self) -> None: + """This function is used as part of transactions. You do not have to + provide the method.""" + ... + + def Destroy(self) -> None: + """The opposite of :meth:`VTModule.Create`. This method is called when + the table is no longer used. Note that you must always release + resources even if you intend to return an error, as it will not be + called again on error. SQLite may also leak memory + if you return an error.""" + ... + + def Disconnect(self) -> None: + """The opposite of :meth:`VTModule.Connect`. This method is called when + a reference to a virtual table is no longer used, but :meth:`VTTable.Destroy` will + be called when the table is no longer used.""" + ... + + def FindFunction(self, name: str, nargs: int): + """Called to find if the virtual table has its own implementation of a + particular scalar function. You should return the function if you + have it, else return None. You do not have to provide this method. + + This method is called while SQLite is `preparing + `_ a query. If a query is + in the :ref:`statement cache ` then *FindFunction* + won't be called again. If you want to return different + implementations for the same function over time then you will need + to disable the :ref:`statement cache `. + + :param name: The function name + :param nargs: How many arguments the function takes + + .. seealso:: + + * :meth:`Connection.overloadfunction`""" + ... + + def Open(self) -> VTCursor: + """Returns a :class:`cursor ` object.""" + ... + + def Rename(self, newname: str) -> None: + """Notification that the table will be given a new name. If you return + without raising an exception, then SQLite renames the table (you + don't have to do anything). If you raise an exception then the + renaming is prevented. You do not have to provide this method.""" + ... + + def Rollback(self) -> None: + """This function is used as part of transactions. You do not have to + provide the method.""" + ... + + def Sync(self) -> None: + """This function is used as part of transactions. You do not have to + provide the method.""" + ... + + def UpdateChangeRow(self, row: int, newrowid: int, fields: Tuple[SQLiteValue, ...]): + """Change an existing row. You may also need to change the rowid - for example if the query was + ``UPDATE table SET rowid=rowid+100 WHERE ...`` + + :param row: The existing 64 bit integer rowid + :param newrowid: If not the same as *row* then also change the rowid to this. + :param fields: A tuple of values the same length and order as columns in your table""" + ... + + def UpdateDeleteRow(self, rowid: int): + """Delete the row with the specified *rowid*. + + :param rowid: 64 bit integer""" + ... + + def UpdateInsertRow(self, rowid: Optional[int], fields: Tuple[SQLiteValue, ...]) -> Optional[int]: + """Insert a row with the specified *rowid*. + + :param rowid: *None* if you should choose the rowid yourself, else a 64 bit integer + :param fields: A tuple of values the same length and order as columns in your table + + :returns: If *rowid* was *None* then return the id you assigned + to the row. If *rowid* was not *None* then the return value + is ignored.""" + ... + class zeroblob: - def __init__(self, size: int): ... - def length(self) -> int: ... + """If you want to insert a blob into a row, you previously needed to + supply the entire blob in one go. To read just one byte also + required retrieving the blob in its entirety. For example to insert + a 100MB file you would have done:: + + largedata=open("largefile", "rb").read() + cur.execute("insert into foo values(?)", (largedata,)) + + SQLite 3.5 allowed for incremental Blob I/O so you can read and + write blobs in small amounts. You cannot change the size of a blob + so you need to reserve space which you do through zeroblob which + creates a blob of the specified size but full of zero bytes. For + example you would reserve space for your 100MB one of these two + ways:: + + cur.execute("insert into foo values(zeroblob(100000000))") + cur.execute("insert into foo values(?), + (apsw.zeroblob(100000000),)) + + This class is used for the second way. Once a blob exists in the + database, you then use the :class:`Blob` class to read and write its + contents.""" + + def __init__(self, size: int): + """:param size: Number of zeroed bytes to create""" + ... + + def length(self) -> int: + """Size of zero blob in bytes.""" + ... + + + +SQLITE_ABORT: int = 4 +"""For `Result Codes '__""" +SQLITE_ABORT_ROLLBACK: int = 516 +"""For `Extended Result Codes '__""" +SQLITE_ACCESS_EXISTS: int = 0 +"""For `Flags for the xAccess VFS method '__""" +SQLITE_ACCESS_READ: int = 2 +"""For `Flags for the xAccess VFS method '__""" +SQLITE_ACCESS_READWRITE: int = 1 +"""For `Flags for the xAccess VFS method '__""" +SQLITE_ALTER_TABLE: int = 26 +"""For `Authorizer Action Codes '__""" +SQLITE_ANALYZE: int = 28 +"""For `Authorizer Action Codes '__""" +SQLITE_ATTACH: int = 24 +"""For `Authorizer Action Codes '__""" +SQLITE_AUTH: int = 23 +"""For `Result Codes '__""" +SQLITE_AUTH_USER: int = 279 +"""For `Extended Result Codes '__""" +SQLITE_BUSY: int = 5 +"""For `Result Codes '__""" +SQLITE_BUSY_RECOVERY: int = 261 +"""For `Extended Result Codes '__""" +SQLITE_BUSY_SNAPSHOT: int = 517 +"""For `Extended Result Codes '__""" +SQLITE_BUSY_TIMEOUT: int = 773 +"""For `Extended Result Codes '__""" +SQLITE_CANTOPEN: int = 14 +"""For `Result Codes '__""" +SQLITE_CANTOPEN_CONVPATH: int = 1038 +"""For `Extended Result Codes '__""" +SQLITE_CANTOPEN_DIRTYWAL: int = 1294 +"""For `Extended Result Codes '__""" +SQLITE_CANTOPEN_FULLPATH: int = 782 +"""For `Extended Result Codes '__""" +SQLITE_CANTOPEN_ISDIR: int = 526 +"""For `Extended Result Codes '__""" +SQLITE_CANTOPEN_NOTEMPDIR: int = 270 +"""For `Extended Result Codes '__""" +SQLITE_CANTOPEN_SYMLINK: int = 1550 +"""For `Extended Result Codes '__""" +SQLITE_CHECKPOINT_FULL: int = 1 +"""For `Checkpoint Mode Values '__""" +SQLITE_CHECKPOINT_PASSIVE: int = 0 +"""For `Checkpoint Mode Values '__""" +SQLITE_CHECKPOINT_RESTART: int = 2 +"""For `Checkpoint Mode Values '__""" +SQLITE_CHECKPOINT_TRUNCATE: int = 3 +"""For `Checkpoint Mode Values '__""" +SQLITE_CONFIG_COVERING_INDEX_SCAN: int = 20 +"""For `Configuration Options '__""" +SQLITE_CONFIG_GETMALLOC: int = 5 +"""For `Configuration Options '__""" +SQLITE_CONFIG_GETMUTEX: int = 11 +"""For `Configuration Options '__""" +SQLITE_CONFIG_GETPCACHE: int = 15 +"""For `Configuration Options '__""" +SQLITE_CONFIG_GETPCACHE2: int = 19 +"""For `Configuration Options '__""" +SQLITE_CONFIG_HEAP: int = 8 +"""For `Configuration Options '__""" +SQLITE_CONFIG_LOG: int = 16 +"""For `Configuration Options '__""" +SQLITE_CONFIG_LOOKASIDE: int = 13 +"""For `Configuration Options '__""" +SQLITE_CONFIG_MALLOC: int = 4 +"""For `Configuration Options '__""" +SQLITE_CONFIG_MEMDB_MAXSIZE: int = 29 +"""For `Configuration Options '__""" +SQLITE_CONFIG_MEMSTATUS: int = 9 +"""For `Configuration Options '__""" +SQLITE_CONFIG_MMAP_SIZE: int = 22 +"""For `Configuration Options '__""" +SQLITE_CONFIG_MULTITHREAD: int = 2 +"""For `Configuration Options '__""" +SQLITE_CONFIG_MUTEX: int = 10 +"""For `Configuration Options '__""" +SQLITE_CONFIG_PAGECACHE: int = 7 +"""For `Configuration Options '__""" +SQLITE_CONFIG_PCACHE: int = 14 +"""For `Configuration Options '__""" +SQLITE_CONFIG_PCACHE2: int = 18 +"""For `Configuration Options '__""" +SQLITE_CONFIG_PCACHE_HDRSZ: int = 24 +"""For `Configuration Options '__""" +SQLITE_CONFIG_PMASZ: int = 25 +"""For `Configuration Options '__""" +SQLITE_CONFIG_SCRATCH: int = 6 +"""For `Configuration Options '__""" +SQLITE_CONFIG_SERIALIZED: int = 3 +"""For `Configuration Options '__""" +SQLITE_CONFIG_SINGLETHREAD: int = 1 +"""For `Configuration Options '__""" +SQLITE_CONFIG_SMALL_MALLOC: int = 27 +"""For `Configuration Options '__""" +SQLITE_CONFIG_SORTERREF_SIZE: int = 28 +"""For `Configuration Options '__""" +SQLITE_CONFIG_SQLLOG: int = 21 +"""For `Configuration Options '__""" +SQLITE_CONFIG_STMTJRNL_SPILL: int = 26 +"""For `Configuration Options '__""" +SQLITE_CONFIG_URI: int = 17 +"""For `Configuration Options '__""" +SQLITE_CONFIG_WIN32_HEAPSIZE: int = 23 +"""For `Configuration Options '__""" +SQLITE_CONSTRAINT: int = 19 +"""For `Result Codes '__""" +SQLITE_CONSTRAINT_CHECK: int = 275 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_COMMITHOOK: int = 531 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_DATATYPE: int = 3091 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_FOREIGNKEY: int = 787 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_FUNCTION: int = 1043 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_NOTNULL: int = 1299 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_PINNED: int = 2835 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_PRIMARYKEY: int = 1555 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_ROWID: int = 2579 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_TRIGGER: int = 1811 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_UNIQUE: int = 2067 +"""For `Extended Result Codes '__""" +SQLITE_CONSTRAINT_VTAB: int = 2323 +"""For `Extended Result Codes '__""" +SQLITE_COPY: int = 0 +"""For `Authorizer Action Codes '__""" +SQLITE_CORRUPT: int = 11 +"""For `Result Codes '__""" +SQLITE_CORRUPT_INDEX: int = 779 +"""For `Extended Result Codes '__""" +SQLITE_CORRUPT_SEQUENCE: int = 523 +"""For `Extended Result Codes '__""" +SQLITE_CORRUPT_VTAB: int = 267 +"""For `Extended Result Codes '__""" +SQLITE_CREATE_INDEX: int = 1 +"""For `Authorizer Action Codes '__""" +SQLITE_CREATE_TABLE: int = 2 +"""For `Authorizer Action Codes '__""" +SQLITE_CREATE_TEMP_INDEX: int = 3 +"""For `Authorizer Action Codes '__""" +SQLITE_CREATE_TEMP_TABLE: int = 4 +"""For `Authorizer Action Codes '__""" +SQLITE_CREATE_TEMP_TRIGGER: int = 5 +"""For `Authorizer Action Codes '__""" +SQLITE_CREATE_TEMP_VIEW: int = 6 +"""For `Authorizer Action Codes '__""" +SQLITE_CREATE_TRIGGER: int = 7 +"""For `Authorizer Action Codes '__""" +SQLITE_CREATE_VIEW: int = 8 +"""For `Authorizer Action Codes '__""" +SQLITE_CREATE_VTABLE: int = 29 +"""For `Authorizer Action Codes '__""" +SQLITE_DBCONFIG_DEFENSIVE: int = 1010 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_DQS_DDL: int = 1014 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_DQS_DML: int = 1013 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_ENABLE_FKEY: int = 1002 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER: int = 1004 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION: int = 1005 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_ENABLE_QPSG: int = 1007 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_ENABLE_TRIGGER: int = 1003 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_ENABLE_VIEW: int = 1015 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_LEGACY_ALTER_TABLE: int = 1012 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_LEGACY_FILE_FORMAT: int = 1016 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_LOOKASIDE: int = 1001 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_MAINDBNAME: int = 1000 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_MAX: int = 1017 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE: int = 1006 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_RESET_DATABASE: int = 1009 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_TRIGGER_EQP: int = 1008 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_TRUSTED_SCHEMA: int = 1017 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBCONFIG_WRITABLE_SCHEMA: int = 1011 +"""For `Database Connection Configuration Options '__""" +SQLITE_DBSTATUS_CACHE_HIT: int = 7 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_CACHE_MISS: int = 8 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_CACHE_SPILL: int = 12 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_CACHE_USED: int = 1 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_CACHE_USED_SHARED: int = 11 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_CACHE_WRITE: int = 9 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_DEFERRED_FKS: int = 10 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_LOOKASIDE_HIT: int = 4 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL: int = 6 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE: int = 5 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_LOOKASIDE_USED: int = 0 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_MAX: int = 12 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_SCHEMA_USED: int = 2 +"""For `Status Parameters for database connections '__""" +SQLITE_DBSTATUS_STMT_USED: int = 3 +"""For `Status Parameters for database connections '__""" +SQLITE_DELETE: int = 9 +"""For `Authorizer Action Codes '__""" +SQLITE_DENY: int = 1 +"""For `Authorizer Return Codes '__""" +SQLITE_DETACH: int = 25 +"""For `Authorizer Action Codes '__""" +SQLITE_DONE: int = 101 +"""For `Result Codes '__""" +SQLITE_DROP_INDEX: int = 10 +"""For `Authorizer Action Codes '__""" +SQLITE_DROP_TABLE: int = 11 +"""For `Authorizer Action Codes '__""" +SQLITE_DROP_TEMP_INDEX: int = 12 +"""For `Authorizer Action Codes '__""" +SQLITE_DROP_TEMP_TABLE: int = 13 +"""For `Authorizer Action Codes '__""" +SQLITE_DROP_TEMP_TRIGGER: int = 14 +"""For `Authorizer Action Codes '__""" +SQLITE_DROP_TEMP_VIEW: int = 15 +"""For `Authorizer Action Codes '__""" +SQLITE_DROP_TRIGGER: int = 16 +"""For `Authorizer Action Codes '__""" +SQLITE_DROP_VIEW: int = 17 +"""For `Authorizer Action Codes '__""" +SQLITE_DROP_VTABLE: int = 30 +"""For `Authorizer Action Codes '__""" +SQLITE_EMPTY: int = 16 +"""For `Result Codes '__""" +SQLITE_ERROR: int = 1 +"""For `Result Codes '__""" +SQLITE_ERROR_MISSING_COLLSEQ: int = 257 +"""For `Extended Result Codes '__""" +SQLITE_ERROR_RETRY: int = 513 +"""For `Extended Result Codes '__""" +SQLITE_ERROR_SNAPSHOT: int = 769 +"""For `Extended Result Codes '__""" +SQLITE_FAIL: int = 3 +"""For `Conflict resolution modes '__""" +SQLITE_FCNTL_BEGIN_ATOMIC_WRITE: int = 31 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_BUSYHANDLER: int = 15 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_CHUNK_SIZE: int = 6 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_CKPT_DONE: int = 37 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_CKPT_START: int = 39 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_CKSM_FILE: int = 41 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_COMMIT_ATOMIC_WRITE: int = 32 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_COMMIT_PHASETWO: int = 22 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_DATA_VERSION: int = 35 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_EXTERNAL_READER: int = 40 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_FILE_POINTER: int = 7 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_GET_LOCKPROXYFILE: int = 2 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_HAS_MOVED: int = 20 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_JOURNAL_POINTER: int = 28 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_LAST_ERRNO: int = 4 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_LOCKSTATE: int = 1 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_LOCK_TIMEOUT: int = 34 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_MMAP_SIZE: int = 18 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_OVERWRITE: int = 11 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_PDB: int = 30 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_PERSIST_WAL: int = 10 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_POWERSAFE_OVERWRITE: int = 13 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_PRAGMA: int = 14 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_RBU: int = 26 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_RESERVE_BYTES: int = 38 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE: int = 33 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_SET_LOCKPROXYFILE: int = 3 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_SIZE_HINT: int = 5 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_SIZE_LIMIT: int = 36 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_SYNC: int = 21 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_SYNC_OMITTED: int = 8 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_TEMPFILENAME: int = 16 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_TRACE: int = 19 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_VFSNAME: int = 12 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_VFS_POINTER: int = 27 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_WAL_BLOCK: int = 24 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_WIN32_AV_RETRY: int = 9 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_WIN32_GET_HANDLE: int = 29 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_WIN32_SET_HANDLE: int = 23 +"""For `Standard File Control Opcodes '__""" +SQLITE_FCNTL_ZIPVFS: int = 25 +"""For `Standard File Control Opcodes '__""" +SQLITE_FORMAT: int = 24 +"""For `Result Codes '__""" +SQLITE_FULL: int = 13 +"""For `Result Codes '__""" +SQLITE_FUNCTION: int = 31 +"""For `Authorizer Action Codes '__""" +SQLITE_IGNORE: int = 2 +"""For `Authorizer Return Codes '__""" +SQLITE_INDEX_CONSTRAINT_EQ: int = 2 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_FUNCTION: int = 150 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_GE: int = 32 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_GLOB: int = 66 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_GT: int = 4 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_IS: int = 72 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_ISNOT: int = 69 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_ISNOTNULL: int = 70 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_ISNULL: int = 71 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_LE: int = 8 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_LIKE: int = 65 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_LIMIT: int = 73 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_LT: int = 16 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_MATCH: int = 64 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_NE: int = 68 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_OFFSET: int = 74 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_CONSTRAINT_REGEXP: int = 67 +"""For `Virtual Table Constraint Operator Codes '__""" +SQLITE_INDEX_SCAN_UNIQUE: int = 1 +"""For `Virtual Table Scan Flags '__""" +SQLITE_INSERT: int = 18 +"""For `Authorizer Action Codes '__""" +SQLITE_INTERNAL: int = 2 +"""For `Result Codes '__""" +SQLITE_INTERRUPT: int = 9 +"""For `Result Codes '__""" +SQLITE_IOCAP_ATOMIC: int = 1 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_ATOMIC16K: int = 64 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_ATOMIC1K: int = 4 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_ATOMIC2K: int = 8 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_ATOMIC32K: int = 128 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_ATOMIC4K: int = 16 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_ATOMIC512: int = 2 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_ATOMIC64K: int = 256 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_ATOMIC8K: int = 32 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_BATCH_ATOMIC: int = 16384 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_IMMUTABLE: int = 8192 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_POWERSAFE_OVERWRITE: int = 4096 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_SAFE_APPEND: int = 512 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_SEQUENTIAL: int = 1024 +"""For `Device Characteristics '__""" +SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN: int = 2048 +"""For `Device Characteristics '__""" +SQLITE_IOERR: int = 10 +"""For `Result Codes '__""" +SQLITE_IOERR_ACCESS: int = 3338 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_AUTH: int = 7178 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_BEGIN_ATOMIC: int = 7434 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_BLOCKED: int = 2826 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_CHECKRESERVEDLOCK: int = 3594 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_CLOSE: int = 4106 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_COMMIT_ATOMIC: int = 7690 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_CONVPATH: int = 6666 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_CORRUPTFS: int = 8458 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_DATA: int = 8202 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_DELETE: int = 2570 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_DELETE_NOENT: int = 5898 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_DIR_CLOSE: int = 4362 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_DIR_FSYNC: int = 1290 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_FSTAT: int = 1802 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_FSYNC: int = 1034 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_GETTEMPPATH: int = 6410 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_LOCK: int = 3850 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_MMAP: int = 6154 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_NOMEM: int = 3082 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_RDLOCK: int = 2314 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_READ: int = 266 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_ROLLBACK_ATOMIC: int = 7946 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_SEEK: int = 5642 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_SHMLOCK: int = 5130 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_SHMMAP: int = 5386 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_SHMOPEN: int = 4618 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_SHMSIZE: int = 4874 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_SHORT_READ: int = 522 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_TRUNCATE: int = 1546 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_UNLOCK: int = 2058 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_VNODE: int = 6922 +"""For `Extended Result Codes '__""" +SQLITE_IOERR_WRITE: int = 778 +"""For `Extended Result Codes '__""" +SQLITE_LIMIT_ATTACHED: int = 7 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_COLUMN: int = 2 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_COMPOUND_SELECT: int = 4 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_EXPR_DEPTH: int = 3 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_FUNCTION_ARG: int = 6 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_LENGTH: int = 0 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_LIKE_PATTERN_LENGTH: int = 8 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_SQL_LENGTH: int = 1 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_TRIGGER_DEPTH: int = 10 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_VARIABLE_NUMBER: int = 9 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_VDBE_OP: int = 5 +"""For `Run-Time Limit Categories '__""" +SQLITE_LIMIT_WORKER_THREADS: int = 11 +"""For `Run-Time Limit Categories '__""" +SQLITE_LOCKED: int = 6 +"""For `Result Codes '__""" +SQLITE_LOCKED_SHAREDCACHE: int = 262 +"""For `Extended Result Codes '__""" +SQLITE_LOCKED_VTAB: int = 518 +"""For `Extended Result Codes '__""" +SQLITE_LOCK_EXCLUSIVE: int = 4 +"""For `File Locking Levels '__""" +SQLITE_LOCK_NONE: int = 0 +"""For `File Locking Levels '__""" +SQLITE_LOCK_PENDING: int = 3 +"""For `File Locking Levels '__""" +SQLITE_LOCK_RESERVED: int = 2 +"""For `File Locking Levels '__""" +SQLITE_LOCK_SHARED: int = 1 +"""For `File Locking Levels '__""" +SQLITE_MISMATCH: int = 20 +"""For `Result Codes '__""" +SQLITE_MISUSE: int = 21 +"""For `Result Codes '__""" +SQLITE_NOLFS: int = 22 +"""For `Result Codes '__""" +SQLITE_NOMEM: int = 7 +"""For `Result Codes '__""" +SQLITE_NOTADB: int = 26 +"""For `Result Codes '__""" +SQLITE_NOTFOUND: int = 12 +"""For `Result Codes '__""" +SQLITE_NOTICE: int = 27 +"""For `Result Codes '__""" +SQLITE_NOTICE_RECOVER_ROLLBACK: int = 539 +"""For `Extended Result Codes '__""" +SQLITE_NOTICE_RECOVER_WAL: int = 283 +"""For `Extended Result Codes '__""" +SQLITE_OK: int = 0 +"""For `Result Codes '__""" +SQLITE_OK_LOAD_PERMANENTLY: int = 256 +"""For `Extended Result Codes '__""" +SQLITE_OK_SYMLINK: int = 512 +"""For `Extended Result Codes '__""" +SQLITE_OPEN_AUTOPROXY: int = 32 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_CREATE: int = 4 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_DELETEONCLOSE: int = 8 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_EXCLUSIVE: int = 16 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_EXRESCODE: int = 33554432 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_FULLMUTEX: int = 65536 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_MAIN_DB: int = 256 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_MAIN_JOURNAL: int = 2048 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_MEMORY: int = 128 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_NOFOLLOW: int = 16777216 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_NOMUTEX: int = 32768 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_PRIVATECACHE: int = 262144 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_READONLY: int = 1 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_READWRITE: int = 2 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_SHAREDCACHE: int = 131072 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_SUBJOURNAL: int = 8192 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_SUPER_JOURNAL: int = 16384 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_TEMP_DB: int = 512 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_TEMP_JOURNAL: int = 4096 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_TRANSIENT_DB: int = 1024 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_URI: int = 64 +"""For `Flags For File Open Operations '__""" +SQLITE_OPEN_WAL: int = 524288 +"""For `Flags For File Open Operations '__""" +SQLITE_PERM: int = 3 +"""For `Result Codes '__""" +SQLITE_PRAGMA: int = 19 +"""For `Authorizer Action Codes '__""" +SQLITE_PREPARE_NORMALIZE: int = 2 +"""For `Prepare Flags '__""" +SQLITE_PREPARE_NO_VTAB: int = 4 +"""For `Prepare Flags '__""" +SQLITE_PREPARE_PERSISTENT: int = 1 +"""For `Prepare Flags '__""" +SQLITE_PROTOCOL: int = 15 +"""For `Result Codes '__""" +SQLITE_RANGE: int = 25 +"""For `Result Codes '__""" +SQLITE_READ: int = 20 +"""For `Authorizer Action Codes '__""" +SQLITE_READONLY: int = 8 +"""For `Result Codes '__""" +SQLITE_READONLY_CANTINIT: int = 1288 +"""For `Extended Result Codes '__""" +SQLITE_READONLY_CANTLOCK: int = 520 +"""For `Extended Result Codes '__""" +SQLITE_READONLY_DBMOVED: int = 1032 +"""For `Extended Result Codes '__""" +SQLITE_READONLY_DIRECTORY: int = 1544 +"""For `Extended Result Codes '__""" +SQLITE_READONLY_RECOVERY: int = 264 +"""For `Extended Result Codes '__""" +SQLITE_READONLY_ROLLBACK: int = 776 +"""For `Extended Result Codes '__""" +SQLITE_RECURSIVE: int = 33 +"""For `Authorizer Action Codes '__""" +SQLITE_REINDEX: int = 27 +"""For `Authorizer Action Codes '__""" +SQLITE_REPLACE: int = 5 +"""For `Conflict resolution modes '__""" +SQLITE_ROLLBACK: int = 1 +"""For `Conflict resolution modes '__""" +SQLITE_ROW: int = 100 +"""For `Result Codes '__""" +SQLITE_SAVEPOINT: int = 32 +"""For `Authorizer Action Codes '__""" +SQLITE_SCHEMA: int = 17 +"""For `Result Codes '__""" +SQLITE_SELECT: int = 21 +"""For `Authorizer Action Codes '__""" +SQLITE_SHM_EXCLUSIVE: int = 8 +"""For `Flags for the xShmLock VFS method '__""" +SQLITE_SHM_LOCK: int = 2 +"""For `Flags for the xShmLock VFS method '__""" +SQLITE_SHM_SHARED: int = 4 +"""For `Flags for the xShmLock VFS method '__""" +SQLITE_SHM_UNLOCK: int = 1 +"""For `Flags for the xShmLock VFS method '__""" +SQLITE_STATUS_MALLOC_COUNT: int = 9 +"""For `Status Parameters '__""" +SQLITE_STATUS_MALLOC_SIZE: int = 5 +"""For `Status Parameters '__""" +SQLITE_STATUS_MEMORY_USED: int = 0 +"""For `Status Parameters '__""" +SQLITE_STATUS_PAGECACHE_OVERFLOW: int = 2 +"""For `Status Parameters '__""" +SQLITE_STATUS_PAGECACHE_SIZE: int = 7 +"""For `Status Parameters '__""" +SQLITE_STATUS_PAGECACHE_USED: int = 1 +"""For `Status Parameters '__""" +SQLITE_STATUS_PARSER_STACK: int = 6 +"""For `Status Parameters '__""" +SQLITE_STATUS_SCRATCH_OVERFLOW: int = 4 +"""For `Status Parameters '__""" +SQLITE_STATUS_SCRATCH_SIZE: int = 8 +"""For `Status Parameters '__""" +SQLITE_STATUS_SCRATCH_USED: int = 3 +"""For `Status Parameters '__""" +SQLITE_SYNC_DATAONLY: int = 16 +"""For `Synchronization Type Flags '__""" +SQLITE_SYNC_FULL: int = 3 +"""For `Synchronization Type Flags '__""" +SQLITE_SYNC_NORMAL: int = 2 +"""For `Synchronization Type Flags '__""" +SQLITE_TOOBIG: int = 18 +"""For `Result Codes '__""" +SQLITE_TRANSACTION: int = 22 +"""For `Authorizer Action Codes '__""" +SQLITE_TXN_NONE: int = 0 +"""For `Allowed return values from [sqlite3_txn_state()] '__""" +SQLITE_TXN_READ: int = 1 +"""For `Allowed return values from [sqlite3_txn_state()] '__""" +SQLITE_TXN_WRITE: int = 2 +"""For `Allowed return values from [sqlite3_txn_state()] '__""" +SQLITE_UPDATE: int = 23 +"""For `Authorizer Action Codes '__""" +SQLITE_VTAB_CONSTRAINT_SUPPORT: int = 1 +"""For `Virtual Table Configuration Options '__""" +SQLITE_VTAB_DIRECTONLY: int = 3 +"""For `Virtual Table Configuration Options '__""" +SQLITE_VTAB_INNOCUOUS: int = 2 +"""For `Virtual Table Configuration Options '__""" +SQLITE_WARNING: int = 28 +"""For `Result Codes '__""" +SQLITE_WARNING_AUTOINDEX: int = 284 +"""For `Extended Result Codes '__""" -SQLITE_ABORT: int -SQLITE_ABORT_ROLLBACK: int -SQLITE_ACCESS_EXISTS: int -SQLITE_ACCESS_READ: int -SQLITE_ACCESS_READWRITE: int -SQLITE_ALTER_TABLE: int -SQLITE_ANALYZE: int -SQLITE_ATTACH: int -SQLITE_AUTH: int -SQLITE_AUTH_USER: int -SQLITE_BUSY: int -SQLITE_BUSY_RECOVERY: int -SQLITE_BUSY_SNAPSHOT: int -SQLITE_BUSY_TIMEOUT: int -SQLITE_CANTOPEN: int -SQLITE_CANTOPEN_CONVPATH: int -SQLITE_CANTOPEN_DIRTYWAL: int -SQLITE_CANTOPEN_FULLPATH: int -SQLITE_CANTOPEN_ISDIR: int -SQLITE_CANTOPEN_NOTEMPDIR: int -SQLITE_CANTOPEN_SYMLINK: int -SQLITE_CHECKPOINT_FULL: int -SQLITE_CHECKPOINT_PASSIVE: int -SQLITE_CHECKPOINT_RESTART: int -SQLITE_CHECKPOINT_TRUNCATE: int -SQLITE_CONFIG_COVERING_INDEX_SCAN: int -SQLITE_CONFIG_GETMALLOC: int -SQLITE_CONFIG_GETMUTEX: int -SQLITE_CONFIG_GETPCACHE: int -SQLITE_CONFIG_GETPCACHE2: int -SQLITE_CONFIG_HEAP: int -SQLITE_CONFIG_LOG: int -SQLITE_CONFIG_LOOKASIDE: int -SQLITE_CONFIG_MALLOC: int -SQLITE_CONFIG_MEMDB_MAXSIZE: int -SQLITE_CONFIG_MEMSTATUS: int -SQLITE_CONFIG_MMAP_SIZE: int -SQLITE_CONFIG_MULTITHREAD: int -SQLITE_CONFIG_MUTEX: int -SQLITE_CONFIG_PAGECACHE: int -SQLITE_CONFIG_PCACHE: int -SQLITE_CONFIG_PCACHE2: int -SQLITE_CONFIG_PCACHE_HDRSZ: int -SQLITE_CONFIG_PMASZ: int -SQLITE_CONFIG_SCRATCH: int -SQLITE_CONFIG_SERIALIZED: int -SQLITE_CONFIG_SINGLETHREAD: int -SQLITE_CONFIG_SMALL_MALLOC: int -SQLITE_CONFIG_SORTERREF_SIZE: int -SQLITE_CONFIG_SQLLOG: int -SQLITE_CONFIG_STMTJRNL_SPILL: int -SQLITE_CONFIG_URI: int -SQLITE_CONFIG_WIN32_HEAPSIZE: int -SQLITE_CONSTRAINT: int -SQLITE_CONSTRAINT_CHECK: int -SQLITE_CONSTRAINT_COMMITHOOK: int -SQLITE_CONSTRAINT_DATATYPE: int -SQLITE_CONSTRAINT_FOREIGNKEY: int -SQLITE_CONSTRAINT_FUNCTION: int -SQLITE_CONSTRAINT_NOTNULL: int -SQLITE_CONSTRAINT_PINNED: int -SQLITE_CONSTRAINT_PRIMARYKEY: int -SQLITE_CONSTRAINT_ROWID: int -SQLITE_CONSTRAINT_TRIGGER: int -SQLITE_CONSTRAINT_UNIQUE: int -SQLITE_CONSTRAINT_VTAB: int -SQLITE_COPY: int -SQLITE_CORRUPT: int -SQLITE_CORRUPT_INDEX: int -SQLITE_CORRUPT_SEQUENCE: int -SQLITE_CORRUPT_VTAB: int -SQLITE_CREATE_INDEX: int -SQLITE_CREATE_TABLE: int -SQLITE_CREATE_TEMP_INDEX: int -SQLITE_CREATE_TEMP_TABLE: int -SQLITE_CREATE_TEMP_TRIGGER: int -SQLITE_CREATE_TEMP_VIEW: int -SQLITE_CREATE_TRIGGER: int -SQLITE_CREATE_VIEW: int -SQLITE_CREATE_VTABLE: int -SQLITE_DBCONFIG_DEFENSIVE: int -SQLITE_DBCONFIG_DQS_DDL: int -SQLITE_DBCONFIG_DQS_DML: int -SQLITE_DBCONFIG_ENABLE_FKEY: int -SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER: int -SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION: int -SQLITE_DBCONFIG_ENABLE_QPSG: int -SQLITE_DBCONFIG_ENABLE_TRIGGER: int -SQLITE_DBCONFIG_ENABLE_VIEW: int -SQLITE_DBCONFIG_LEGACY_ALTER_TABLE: int -SQLITE_DBCONFIG_LEGACY_FILE_FORMAT: int -SQLITE_DBCONFIG_LOOKASIDE: int -SQLITE_DBCONFIG_MAINDBNAME: int -SQLITE_DBCONFIG_MAX: int -SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE: int -SQLITE_DBCONFIG_RESET_DATABASE: int -SQLITE_DBCONFIG_TRIGGER_EQP: int -SQLITE_DBCONFIG_TRUSTED_SCHEMA: int -SQLITE_DBCONFIG_WRITABLE_SCHEMA: int -SQLITE_DBSTATUS_CACHE_HIT: int -SQLITE_DBSTATUS_CACHE_MISS: int -SQLITE_DBSTATUS_CACHE_SPILL: int -SQLITE_DBSTATUS_CACHE_USED: int -SQLITE_DBSTATUS_CACHE_USED_SHARED: int -SQLITE_DBSTATUS_CACHE_WRITE: int -SQLITE_DBSTATUS_DEFERRED_FKS: int -SQLITE_DBSTATUS_LOOKASIDE_HIT: int -SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL: int -SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE: int -SQLITE_DBSTATUS_LOOKASIDE_USED: int -SQLITE_DBSTATUS_MAX: int -SQLITE_DBSTATUS_SCHEMA_USED: int -SQLITE_DBSTATUS_STMT_USED: int -SQLITE_DELETE: int -SQLITE_DENY: int -SQLITE_DETACH: int -SQLITE_DONE: int -SQLITE_DROP_INDEX: int -SQLITE_DROP_TABLE: int -SQLITE_DROP_TEMP_INDEX: int -SQLITE_DROP_TEMP_TABLE: int -SQLITE_DROP_TEMP_TRIGGER: int -SQLITE_DROP_TEMP_VIEW: int -SQLITE_DROP_TRIGGER: int -SQLITE_DROP_VIEW: int -SQLITE_DROP_VTABLE: int -SQLITE_EMPTY: int -SQLITE_ERROR: int -SQLITE_ERROR_MISSING_COLLSEQ: int -SQLITE_ERROR_RETRY: int -SQLITE_ERROR_SNAPSHOT: int -SQLITE_FAIL: int -SQLITE_FCNTL_BEGIN_ATOMIC_WRITE: int -SQLITE_FCNTL_BUSYHANDLER: int -SQLITE_FCNTL_CHUNK_SIZE: int -SQLITE_FCNTL_CKPT_DONE: int -SQLITE_FCNTL_CKPT_START: int -SQLITE_FCNTL_CKSM_FILE: int -SQLITE_FCNTL_COMMIT_ATOMIC_WRITE: int -SQLITE_FCNTL_COMMIT_PHASETWO: int -SQLITE_FCNTL_DATA_VERSION: int -SQLITE_FCNTL_EXTERNAL_READER: int -SQLITE_FCNTL_FILE_POINTER: int -SQLITE_FCNTL_GET_LOCKPROXYFILE: int -SQLITE_FCNTL_HAS_MOVED: int -SQLITE_FCNTL_JOURNAL_POINTER: int -SQLITE_FCNTL_LAST_ERRNO: int -SQLITE_FCNTL_LOCKSTATE: int -SQLITE_FCNTL_LOCK_TIMEOUT: int -SQLITE_FCNTL_MMAP_SIZE: int -SQLITE_FCNTL_OVERWRITE: int -SQLITE_FCNTL_PDB: int -SQLITE_FCNTL_PERSIST_WAL: int -SQLITE_FCNTL_POWERSAFE_OVERWRITE: int -SQLITE_FCNTL_PRAGMA: int -SQLITE_FCNTL_RBU: int -SQLITE_FCNTL_RESERVE_BYTES: int -SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE: int -SQLITE_FCNTL_SET_LOCKPROXYFILE: int -SQLITE_FCNTL_SIZE_HINT: int -SQLITE_FCNTL_SIZE_LIMIT: int -SQLITE_FCNTL_SYNC: int -SQLITE_FCNTL_SYNC_OMITTED: int -SQLITE_FCNTL_TEMPFILENAME: int -SQLITE_FCNTL_TRACE: int -SQLITE_FCNTL_VFSNAME: int -SQLITE_FCNTL_VFS_POINTER: int -SQLITE_FCNTL_WAL_BLOCK: int -SQLITE_FCNTL_WIN32_AV_RETRY: int -SQLITE_FCNTL_WIN32_GET_HANDLE: int -SQLITE_FCNTL_WIN32_SET_HANDLE: int -SQLITE_FCNTL_ZIPVFS: int -SQLITE_FORMAT: int -SQLITE_FULL: int -SQLITE_FUNCTION: int -SQLITE_IGNORE: int -SQLITE_INDEX_CONSTRAINT_EQ: int -SQLITE_INDEX_CONSTRAINT_FUNCTION: int -SQLITE_INDEX_CONSTRAINT_GE: int -SQLITE_INDEX_CONSTRAINT_GLOB: int -SQLITE_INDEX_CONSTRAINT_GT: int -SQLITE_INDEX_CONSTRAINT_IS: int -SQLITE_INDEX_CONSTRAINT_ISNOT: int -SQLITE_INDEX_CONSTRAINT_ISNOTNULL: int -SQLITE_INDEX_CONSTRAINT_ISNULL: int -SQLITE_INDEX_CONSTRAINT_LE: int -SQLITE_INDEX_CONSTRAINT_LIKE: int -SQLITE_INDEX_CONSTRAINT_LIMIT: int -SQLITE_INDEX_CONSTRAINT_LT: int -SQLITE_INDEX_CONSTRAINT_MATCH: int -SQLITE_INDEX_CONSTRAINT_NE: int -SQLITE_INDEX_CONSTRAINT_OFFSET: int -SQLITE_INDEX_CONSTRAINT_REGEXP: int -SQLITE_INDEX_SCAN_UNIQUE: int -SQLITE_INSERT: int -SQLITE_INTERNAL: int -SQLITE_INTERRUPT: int -SQLITE_IOCAP_ATOMIC: int -SQLITE_IOCAP_ATOMIC16K: int -SQLITE_IOCAP_ATOMIC1K: int -SQLITE_IOCAP_ATOMIC2K: int -SQLITE_IOCAP_ATOMIC32K: int -SQLITE_IOCAP_ATOMIC4K: int -SQLITE_IOCAP_ATOMIC512: int -SQLITE_IOCAP_ATOMIC64K: int -SQLITE_IOCAP_ATOMIC8K: int -SQLITE_IOCAP_BATCH_ATOMIC: int -SQLITE_IOCAP_IMMUTABLE: int -SQLITE_IOCAP_POWERSAFE_OVERWRITE: int -SQLITE_IOCAP_SAFE_APPEND: int -SQLITE_IOCAP_SEQUENTIAL: int -SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN: int -SQLITE_IOERR: int -SQLITE_IOERR_ACCESS: int -SQLITE_IOERR_AUTH: int -SQLITE_IOERR_BEGIN_ATOMIC: int -SQLITE_IOERR_BLOCKED: int -SQLITE_IOERR_CHECKRESERVEDLOCK: int -SQLITE_IOERR_CLOSE: int -SQLITE_IOERR_COMMIT_ATOMIC: int -SQLITE_IOERR_CONVPATH: int -SQLITE_IOERR_CORRUPTFS: int -SQLITE_IOERR_DATA: int -SQLITE_IOERR_DELETE: int -SQLITE_IOERR_DELETE_NOENT: int -SQLITE_IOERR_DIR_CLOSE: int -SQLITE_IOERR_DIR_FSYNC: int -SQLITE_IOERR_FSTAT: int -SQLITE_IOERR_FSYNC: int -SQLITE_IOERR_GETTEMPPATH: int -SQLITE_IOERR_LOCK: int -SQLITE_IOERR_MMAP: int -SQLITE_IOERR_NOMEM: int -SQLITE_IOERR_RDLOCK: int -SQLITE_IOERR_READ: int -SQLITE_IOERR_ROLLBACK_ATOMIC: int -SQLITE_IOERR_SEEK: int -SQLITE_IOERR_SHMLOCK: int -SQLITE_IOERR_SHMMAP: int -SQLITE_IOERR_SHMOPEN: int -SQLITE_IOERR_SHMSIZE: int -SQLITE_IOERR_SHORT_READ: int -SQLITE_IOERR_TRUNCATE: int -SQLITE_IOERR_UNLOCK: int -SQLITE_IOERR_VNODE: int -SQLITE_IOERR_WRITE: int -SQLITE_LIMIT_ATTACHED: int -SQLITE_LIMIT_COLUMN: int -SQLITE_LIMIT_COMPOUND_SELECT: int -SQLITE_LIMIT_EXPR_DEPTH: int -SQLITE_LIMIT_FUNCTION_ARG: int -SQLITE_LIMIT_LENGTH: int -SQLITE_LIMIT_LIKE_PATTERN_LENGTH: int -SQLITE_LIMIT_SQL_LENGTH: int -SQLITE_LIMIT_TRIGGER_DEPTH: int -SQLITE_LIMIT_VARIABLE_NUMBER: int -SQLITE_LIMIT_VDBE_OP: int -SQLITE_LIMIT_WORKER_THREADS: int -SQLITE_LOCKED: int -SQLITE_LOCKED_SHAREDCACHE: int -SQLITE_LOCKED_VTAB: int -SQLITE_LOCK_EXCLUSIVE: int -SQLITE_LOCK_NONE: int -SQLITE_LOCK_PENDING: int -SQLITE_LOCK_RESERVED: int -SQLITE_LOCK_SHARED: int -SQLITE_MISMATCH: int -SQLITE_MISUSE: int -SQLITE_NOLFS: int -SQLITE_NOMEM: int -SQLITE_NOTADB: int -SQLITE_NOTFOUND: int -SQLITE_NOTICE: int -SQLITE_NOTICE_RECOVER_ROLLBACK: int -SQLITE_NOTICE_RECOVER_WAL: int -SQLITE_OK: int -SQLITE_OK_LOAD_PERMANENTLY: int -SQLITE_OK_SYMLINK: int -SQLITE_OPEN_AUTOPROXY: int -SQLITE_OPEN_CREATE: int -SQLITE_OPEN_DELETEONCLOSE: int -SQLITE_OPEN_EXCLUSIVE: int -SQLITE_OPEN_EXRESCODE: int -SQLITE_OPEN_FULLMUTEX: int -SQLITE_OPEN_MAIN_DB: int -SQLITE_OPEN_MAIN_JOURNAL: int -SQLITE_OPEN_MEMORY: int -SQLITE_OPEN_NOFOLLOW: int -SQLITE_OPEN_NOMUTEX: int -SQLITE_OPEN_PRIVATECACHE: int -SQLITE_OPEN_READONLY: int -SQLITE_OPEN_READWRITE: int -SQLITE_OPEN_SHAREDCACHE: int -SQLITE_OPEN_SUBJOURNAL: int -SQLITE_OPEN_SUPER_JOURNAL: int -SQLITE_OPEN_TEMP_DB: int -SQLITE_OPEN_TEMP_JOURNAL: int -SQLITE_OPEN_TRANSIENT_DB: int -SQLITE_OPEN_URI: int -SQLITE_OPEN_WAL: int -SQLITE_PERM: int -SQLITE_PRAGMA: int -SQLITE_PROTOCOL: int -SQLITE_RANGE: int -SQLITE_READ: int -SQLITE_READONLY: int -SQLITE_READONLY_CANTINIT: int -SQLITE_READONLY_CANTLOCK: int -SQLITE_READONLY_DBMOVED: int -SQLITE_READONLY_DIRECTORY: int -SQLITE_READONLY_RECOVERY: int -SQLITE_READONLY_ROLLBACK: int -SQLITE_RECURSIVE: int -SQLITE_REINDEX: int -SQLITE_REPLACE: int -SQLITE_ROLLBACK: int -SQLITE_ROW: int -SQLITE_SAVEPOINT: int -SQLITE_SCHEMA: int -SQLITE_SELECT: int -SQLITE_SHM_EXCLUSIVE: int -SQLITE_SHM_LOCK: int -SQLITE_SHM_SHARED: int -SQLITE_SHM_UNLOCK: int -SQLITE_STATUS_MALLOC_COUNT: int -SQLITE_STATUS_MALLOC_SIZE: int -SQLITE_STATUS_MEMORY_USED: int -SQLITE_STATUS_PAGECACHE_OVERFLOW: int -SQLITE_STATUS_PAGECACHE_SIZE: int -SQLITE_STATUS_PAGECACHE_USED: int -SQLITE_STATUS_PARSER_STACK: int -SQLITE_STATUS_SCRATCH_OVERFLOW: int -SQLITE_STATUS_SCRATCH_SIZE: int -SQLITE_STATUS_SCRATCH_USED: int -SQLITE_SYNC_DATAONLY: int -SQLITE_SYNC_FULL: int -SQLITE_SYNC_NORMAL: int -SQLITE_TOOBIG: int -SQLITE_TRANSACTION: int -SQLITE_TXN_NONE: int -SQLITE_TXN_READ: int -SQLITE_TXN_WRITE: int -SQLITE_UPDATE: int -SQLITE_VTAB_CONSTRAINT_SUPPORT: int -SQLITE_VTAB_DIRECTONLY: int -SQLITE_VTAB_INNOCUOUS: int -SQLITE_WARNING: int -SQLITE_WARNING_AUTOINDEX: int +mapping_access: Dict[Union[str,int],Union[int,str]] +"""Flags for the xAccess VFS method mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_access_exists.html +SQLITE_ACCESS_EXISTS SQLITE_ACCESS_READ SQLITE_ACCESS_READWRITE""" -mapping_access: Dict[Union[str,int],Union[int,str]] mapping_authorizer_function: Dict[Union[str,int],Union[int,str]] +"""Authorizer Action Codes mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_alter_table.html + +SQLITE_ALTER_TABLE SQLITE_ANALYZE SQLITE_ATTACH SQLITE_COPY +SQLITE_CREATE_INDEX SQLITE_CREATE_TABLE SQLITE_CREATE_TEMP_INDEX +SQLITE_CREATE_TEMP_TABLE SQLITE_CREATE_TEMP_TRIGGER +SQLITE_CREATE_TEMP_VIEW SQLITE_CREATE_TRIGGER SQLITE_CREATE_VIEW +SQLITE_CREATE_VTABLE SQLITE_DELETE SQLITE_DETACH SQLITE_DROP_INDEX +SQLITE_DROP_TABLE SQLITE_DROP_TEMP_INDEX SQLITE_DROP_TEMP_TABLE +SQLITE_DROP_TEMP_TRIGGER SQLITE_DROP_TEMP_VIEW SQLITE_DROP_TRIGGER +SQLITE_DROP_VIEW SQLITE_DROP_VTABLE SQLITE_FUNCTION SQLITE_INSERT +SQLITE_PRAGMA SQLITE_READ SQLITE_RECURSIVE SQLITE_REINDEX +SQLITE_SAVEPOINT SQLITE_SELECT SQLITE_TRANSACTION SQLITE_UPDATE""" + mapping_authorizer_return: Dict[Union[str,int],Union[int,str]] +"""Authorizer Return Codes mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_deny.html + +SQLITE_DENY SQLITE_IGNORE""" + mapping_bestindex_constraints: Dict[Union[str,int],Union[int,str]] +"""Virtual Table Constraint Operator Codes mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_index_constraint_eq.html + +SQLITE_INDEX_CONSTRAINT_EQ SQLITE_INDEX_CONSTRAINT_FUNCTION +SQLITE_INDEX_CONSTRAINT_GE SQLITE_INDEX_CONSTRAINT_GLOB +SQLITE_INDEX_CONSTRAINT_GT SQLITE_INDEX_CONSTRAINT_IS +SQLITE_INDEX_CONSTRAINT_ISNOT SQLITE_INDEX_CONSTRAINT_ISNOTNULL +SQLITE_INDEX_CONSTRAINT_ISNULL SQLITE_INDEX_CONSTRAINT_LE +SQLITE_INDEX_CONSTRAINT_LIKE SQLITE_INDEX_CONSTRAINT_LIMIT +SQLITE_INDEX_CONSTRAINT_LT SQLITE_INDEX_CONSTRAINT_MATCH +SQLITE_INDEX_CONSTRAINT_NE SQLITE_INDEX_CONSTRAINT_OFFSET +SQLITE_INDEX_CONSTRAINT_REGEXP""" + mapping_config: Dict[Union[str,int],Union[int,str]] +"""Configuration Options mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_config_covering_index_scan.html + +SQLITE_CONFIG_COVERING_INDEX_SCAN SQLITE_CONFIG_GETMALLOC +SQLITE_CONFIG_GETMUTEX SQLITE_CONFIG_GETPCACHE +SQLITE_CONFIG_GETPCACHE2 SQLITE_CONFIG_HEAP SQLITE_CONFIG_LOG +SQLITE_CONFIG_LOOKASIDE SQLITE_CONFIG_MALLOC +SQLITE_CONFIG_MEMDB_MAXSIZE SQLITE_CONFIG_MEMSTATUS +SQLITE_CONFIG_MMAP_SIZE SQLITE_CONFIG_MULTITHREAD SQLITE_CONFIG_MUTEX +SQLITE_CONFIG_PAGECACHE SQLITE_CONFIG_PCACHE SQLITE_CONFIG_PCACHE2 +SQLITE_CONFIG_PCACHE_HDRSZ SQLITE_CONFIG_PMASZ SQLITE_CONFIG_SCRATCH +SQLITE_CONFIG_SERIALIZED SQLITE_CONFIG_SINGLETHREAD +SQLITE_CONFIG_SMALL_MALLOC SQLITE_CONFIG_SORTERREF_SIZE +SQLITE_CONFIG_SQLLOG SQLITE_CONFIG_STMTJRNL_SPILL SQLITE_CONFIG_URI +SQLITE_CONFIG_WIN32_HEAPSIZE""" + mapping_conflict_resolution_modes: Dict[Union[str,int],Union[int,str]] +"""Conflict resolution modes mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_fail.html + +SQLITE_FAIL SQLITE_REPLACE SQLITE_ROLLBACK""" + mapping_db_config: Dict[Union[str,int],Union[int,str]] +"""Database Connection Configuration Options mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_dbconfig_defensive.html + +SQLITE_DBCONFIG_DEFENSIVE SQLITE_DBCONFIG_DQS_DDL +SQLITE_DBCONFIG_DQS_DML SQLITE_DBCONFIG_ENABLE_FKEY +SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER +SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION SQLITE_DBCONFIG_ENABLE_QPSG +SQLITE_DBCONFIG_ENABLE_TRIGGER SQLITE_DBCONFIG_ENABLE_VIEW +SQLITE_DBCONFIG_LEGACY_ALTER_TABLE SQLITE_DBCONFIG_LEGACY_FILE_FORMAT +SQLITE_DBCONFIG_LOOKASIDE SQLITE_DBCONFIG_MAINDBNAME +SQLITE_DBCONFIG_MAX SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE +SQLITE_DBCONFIG_RESET_DATABASE SQLITE_DBCONFIG_TRIGGER_EQP +SQLITE_DBCONFIG_TRUSTED_SCHEMA SQLITE_DBCONFIG_WRITABLE_SCHEMA""" + mapping_db_status: Dict[Union[str,int],Union[int,str]] +"""Status Parameters for database connections mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_dbstatus_options.html + +SQLITE_DBSTATUS_CACHE_HIT SQLITE_DBSTATUS_CACHE_MISS +SQLITE_DBSTATUS_CACHE_SPILL SQLITE_DBSTATUS_CACHE_USED +SQLITE_DBSTATUS_CACHE_USED_SHARED SQLITE_DBSTATUS_CACHE_WRITE +SQLITE_DBSTATUS_DEFERRED_FKS SQLITE_DBSTATUS_LOOKASIDE_HIT +SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL +SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE SQLITE_DBSTATUS_LOOKASIDE_USED +SQLITE_DBSTATUS_MAX SQLITE_DBSTATUS_SCHEMA_USED +SQLITE_DBSTATUS_STMT_USED""" + mapping_device_characteristics: Dict[Union[str,int],Union[int,str]] +"""Device Characteristics mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_iocap_atomic.html + +SQLITE_IOCAP_ATOMIC SQLITE_IOCAP_ATOMIC16K SQLITE_IOCAP_ATOMIC1K +SQLITE_IOCAP_ATOMIC2K SQLITE_IOCAP_ATOMIC32K SQLITE_IOCAP_ATOMIC4K +SQLITE_IOCAP_ATOMIC512 SQLITE_IOCAP_ATOMIC64K SQLITE_IOCAP_ATOMIC8K +SQLITE_IOCAP_BATCH_ATOMIC SQLITE_IOCAP_IMMUTABLE +SQLITE_IOCAP_POWERSAFE_OVERWRITE SQLITE_IOCAP_SAFE_APPEND +SQLITE_IOCAP_SEQUENTIAL SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN""" + mapping_extended_result_codes: Dict[Union[str,int],Union[int,str]] +"""Extended Result Codes mapping names to int and int to names. +Doc at https://sqlite.org/rescode.html + +SQLITE_ABORT_ROLLBACK SQLITE_AUTH_USER SQLITE_BUSY_RECOVERY +SQLITE_BUSY_SNAPSHOT SQLITE_BUSY_TIMEOUT SQLITE_CANTOPEN_CONVPATH +SQLITE_CANTOPEN_DIRTYWAL SQLITE_CANTOPEN_FULLPATH +SQLITE_CANTOPEN_ISDIR SQLITE_CANTOPEN_NOTEMPDIR +SQLITE_CANTOPEN_SYMLINK SQLITE_CONSTRAINT_CHECK +SQLITE_CONSTRAINT_COMMITHOOK SQLITE_CONSTRAINT_DATATYPE +SQLITE_CONSTRAINT_FOREIGNKEY SQLITE_CONSTRAINT_FUNCTION +SQLITE_CONSTRAINT_NOTNULL SQLITE_CONSTRAINT_PINNED +SQLITE_CONSTRAINT_PRIMARYKEY SQLITE_CONSTRAINT_ROWID +SQLITE_CONSTRAINT_TRIGGER SQLITE_CONSTRAINT_UNIQUE +SQLITE_CONSTRAINT_VTAB SQLITE_CORRUPT_INDEX SQLITE_CORRUPT_SEQUENCE +SQLITE_CORRUPT_VTAB SQLITE_ERROR_MISSING_COLLSEQ SQLITE_ERROR_RETRY +SQLITE_ERROR_SNAPSHOT SQLITE_IOERR_ACCESS SQLITE_IOERR_AUTH +SQLITE_IOERR_BEGIN_ATOMIC SQLITE_IOERR_BLOCKED +SQLITE_IOERR_CHECKRESERVEDLOCK SQLITE_IOERR_CLOSE +SQLITE_IOERR_COMMIT_ATOMIC SQLITE_IOERR_CONVPATH +SQLITE_IOERR_CORRUPTFS SQLITE_IOERR_DATA SQLITE_IOERR_DELETE +SQLITE_IOERR_DELETE_NOENT SQLITE_IOERR_DIR_CLOSE +SQLITE_IOERR_DIR_FSYNC SQLITE_IOERR_FSTAT SQLITE_IOERR_FSYNC +SQLITE_IOERR_GETTEMPPATH SQLITE_IOERR_LOCK SQLITE_IOERR_MMAP +SQLITE_IOERR_NOMEM SQLITE_IOERR_RDLOCK SQLITE_IOERR_READ +SQLITE_IOERR_ROLLBACK_ATOMIC SQLITE_IOERR_SEEK SQLITE_IOERR_SHMLOCK +SQLITE_IOERR_SHMMAP SQLITE_IOERR_SHMOPEN SQLITE_IOERR_SHMSIZE +SQLITE_IOERR_SHORT_READ SQLITE_IOERR_TRUNCATE SQLITE_IOERR_UNLOCK +SQLITE_IOERR_VNODE SQLITE_IOERR_WRITE SQLITE_LOCKED_SHAREDCACHE +SQLITE_LOCKED_VTAB SQLITE_NOTICE_RECOVER_ROLLBACK +SQLITE_NOTICE_RECOVER_WAL SQLITE_OK_LOAD_PERMANENTLY SQLITE_OK_SYMLINK +SQLITE_READONLY_CANTINIT SQLITE_READONLY_CANTLOCK +SQLITE_READONLY_DBMOVED SQLITE_READONLY_DIRECTORY +SQLITE_READONLY_RECOVERY SQLITE_READONLY_ROLLBACK +SQLITE_WARNING_AUTOINDEX""" + mapping_file_control: Dict[Union[str,int],Union[int,str]] +"""Standard File Control Opcodes mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_fcntl_begin_atomic_write.html + +SQLITE_FCNTL_BEGIN_ATOMIC_WRITE SQLITE_FCNTL_BUSYHANDLER +SQLITE_FCNTL_CHUNK_SIZE SQLITE_FCNTL_CKPT_DONE SQLITE_FCNTL_CKPT_START +SQLITE_FCNTL_CKSM_FILE SQLITE_FCNTL_COMMIT_ATOMIC_WRITE +SQLITE_FCNTL_COMMIT_PHASETWO SQLITE_FCNTL_DATA_VERSION +SQLITE_FCNTL_EXTERNAL_READER SQLITE_FCNTL_FILE_POINTER +SQLITE_FCNTL_GET_LOCKPROXYFILE SQLITE_FCNTL_HAS_MOVED +SQLITE_FCNTL_JOURNAL_POINTER SQLITE_FCNTL_LAST_ERRNO +SQLITE_FCNTL_LOCKSTATE SQLITE_FCNTL_LOCK_TIMEOUT +SQLITE_FCNTL_MMAP_SIZE SQLITE_FCNTL_OVERWRITE SQLITE_FCNTL_PDB +SQLITE_FCNTL_PERSIST_WAL SQLITE_FCNTL_POWERSAFE_OVERWRITE +SQLITE_FCNTL_PRAGMA SQLITE_FCNTL_RBU SQLITE_FCNTL_RESERVE_BYTES +SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE SQLITE_FCNTL_SET_LOCKPROXYFILE +SQLITE_FCNTL_SIZE_HINT SQLITE_FCNTL_SIZE_LIMIT SQLITE_FCNTL_SYNC +SQLITE_FCNTL_SYNC_OMITTED SQLITE_FCNTL_TEMPFILENAME SQLITE_FCNTL_TRACE +SQLITE_FCNTL_VFSNAME SQLITE_FCNTL_VFS_POINTER SQLITE_FCNTL_WAL_BLOCK +SQLITE_FCNTL_WIN32_AV_RETRY SQLITE_FCNTL_WIN32_GET_HANDLE +SQLITE_FCNTL_WIN32_SET_HANDLE SQLITE_FCNTL_ZIPVFS""" + mapping_limits: Dict[Union[str,int],Union[int,str]] +"""Run-Time Limit Categories mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_limit_attached.html + +SQLITE_LIMIT_ATTACHED SQLITE_LIMIT_COLUMN SQLITE_LIMIT_COMPOUND_SELECT +SQLITE_LIMIT_EXPR_DEPTH SQLITE_LIMIT_FUNCTION_ARG SQLITE_LIMIT_LENGTH +SQLITE_LIMIT_LIKE_PATTERN_LENGTH SQLITE_LIMIT_SQL_LENGTH +SQLITE_LIMIT_TRIGGER_DEPTH SQLITE_LIMIT_VARIABLE_NUMBER +SQLITE_LIMIT_VDBE_OP SQLITE_LIMIT_WORKER_THREADS""" + mapping_locking_level: Dict[Union[str,int],Union[int,str]] +"""File Locking Levels mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_lock_exclusive.html + +SQLITE_LOCK_EXCLUSIVE SQLITE_LOCK_NONE SQLITE_LOCK_PENDING +SQLITE_LOCK_RESERVED SQLITE_LOCK_SHARED""" + mapping_open_flags: Dict[Union[str,int],Union[int,str]] +"""Flags For File Open Operations mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_open_autoproxy.html + +SQLITE_OPEN_AUTOPROXY SQLITE_OPEN_CREATE SQLITE_OPEN_DELETEONCLOSE +SQLITE_OPEN_EXCLUSIVE SQLITE_OPEN_EXRESCODE SQLITE_OPEN_FULLMUTEX +SQLITE_OPEN_MAIN_DB SQLITE_OPEN_MAIN_JOURNAL SQLITE_OPEN_MEMORY +SQLITE_OPEN_NOFOLLOW SQLITE_OPEN_NOMUTEX SQLITE_OPEN_PRIVATECACHE +SQLITE_OPEN_READONLY SQLITE_OPEN_READWRITE SQLITE_OPEN_SHAREDCACHE +SQLITE_OPEN_SUBJOURNAL SQLITE_OPEN_SUPER_JOURNAL SQLITE_OPEN_TEMP_DB +SQLITE_OPEN_TEMP_JOURNAL SQLITE_OPEN_TRANSIENT_DB SQLITE_OPEN_URI +SQLITE_OPEN_WAL""" + +mapping_prepare_flags: Dict[Union[str,int],Union[int,str]] +"""Prepare Flags mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_prepare_normalize.html + +SQLITE_PREPARE_NORMALIZE SQLITE_PREPARE_NO_VTAB +SQLITE_PREPARE_PERSISTENT""" + mapping_result_codes: Dict[Union[str,int],Union[int,str]] +"""Result Codes mapping names to int and int to names. +Doc at https://sqlite.org/rescode.html + +SQLITE_ABORT SQLITE_AUTH SQLITE_BUSY SQLITE_CANTOPEN SQLITE_CONSTRAINT +SQLITE_CORRUPT SQLITE_DONE SQLITE_EMPTY SQLITE_ERROR SQLITE_FORMAT +SQLITE_FULL SQLITE_INTERNAL SQLITE_INTERRUPT SQLITE_IOERR +SQLITE_LOCKED SQLITE_MISMATCH SQLITE_MISUSE SQLITE_NOLFS SQLITE_NOMEM +SQLITE_NOTADB SQLITE_NOTFOUND SQLITE_NOTICE SQLITE_OK SQLITE_PERM +SQLITE_PROTOCOL SQLITE_RANGE SQLITE_READONLY SQLITE_ROW SQLITE_SCHEMA +SQLITE_TOOBIG SQLITE_WARNING""" + mapping_status: Dict[Union[str,int],Union[int,str]] +"""Status Parameters mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_status_malloc_count.html + +SQLITE_STATUS_MALLOC_COUNT SQLITE_STATUS_MALLOC_SIZE +SQLITE_STATUS_MEMORY_USED SQLITE_STATUS_PAGECACHE_OVERFLOW +SQLITE_STATUS_PAGECACHE_SIZE SQLITE_STATUS_PAGECACHE_USED +SQLITE_STATUS_PARSER_STACK SQLITE_STATUS_SCRATCH_OVERFLOW +SQLITE_STATUS_SCRATCH_SIZE SQLITE_STATUS_SCRATCH_USED""" + mapping_sync: Dict[Union[str,int],Union[int,str]] +"""Synchronization Type Flags mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_sync_dataonly.html + +SQLITE_SYNC_DATAONLY SQLITE_SYNC_FULL SQLITE_SYNC_NORMAL""" + mapping_txn_state: Dict[Union[str,int],Union[int,str]] +"""Allowed return values from [sqlite3_txn_state()] mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_txn_none.html + +SQLITE_TXN_NONE SQLITE_TXN_READ SQLITE_TXN_WRITE""" + mapping_virtual_table_configuration_options: Dict[Union[str,int],Union[int,str]] +"""Virtual Table Configuration Options mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_vtab_constraint_support.html + +SQLITE_VTAB_CONSTRAINT_SUPPORT SQLITE_VTAB_DIRECTONLY +SQLITE_VTAB_INNOCUOUS""" + mapping_virtual_table_scan_flags: Dict[Union[str,int],Union[int,str]] +"""Virtual Table Scan Flags mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_index_scan_unique.html + +SQLITE_INDEX_SCAN_UNIQUE""" + mapping_wal_checkpoint: Dict[Union[str,int],Union[int,str]] +"""Checkpoint Mode Values mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_checkpoint_full.html + +SQLITE_CHECKPOINT_FULL SQLITE_CHECKPOINT_PASSIVE +SQLITE_CHECKPOINT_RESTART SQLITE_CHECKPOINT_TRUNCATE""" + mapping_xshmlock_flags: Dict[Union[str,int],Union[int,str]] +"""Flags for the xShmLock VFS method mapping names to int and int to names. +Doc at https://sqlite.org/c3ref/c_shm_exclusive.html + +SQLITE_SHM_EXCLUSIVE SQLITE_SHM_LOCK SQLITE_SHM_SHARED +SQLITE_SHM_UNLOCK""" + + + +class Error(Exception): + """This is the base for APSW exceptions.""" + +class AbortError(Error): + """*SQLITE_ABORT*. Callback routine requested an abort.""" + +class AuthError(Error): + """*SQLITE_AUTH*. :attr:`Authorization ` denied.""" + +class BindingsError(Error): + """There are several causes for this exception. When using tuples, an incorrect number of bindings where supplied:: + + cursor.execute("select ?,?,?", (1,2)) # too few bindings + cursor.execute("select ?,?,?", (1,2,3,4)) # too many bindings + + You are using named bindings, but not all bindings are named. You should either use entirely the + named style or entirely numeric (unnamed) style:: + + cursor.execute("select * from foo where x=:name and y=?") + + .. note:: + + It is not considered an error to have missing keys in a dictionary. For example this is perfectly valid:: + + cursor.execute("insert into foo values($a,:b,$c)", {'a': 1}) + + *b* and *c* are not in the dict. For missing keys, None/NULL + will be used. This is so you don't have to add lots of spurious + values to the supplied dict. If your schema requires every column + have a value, then SQLite will generate an error due to some + values being None/NULL so that case will be caught.""" + +class BusyError(Error): + """*SQLITE_BUSY*. The database file is locked. Use + :meth:`Connection.setbusytimeout` to change how long SQLite waits + for the database to be unlocked or :meth:`Connection.setbusyhandler` + to use your own handler.""" + +class CantOpenError(Error): + """*SQLITE_CANTOPEN*. Unable to open the database file.""" + +class ConnectionClosedError(Error): + """You have called :meth:`Connection.close` and then continued to use + the :class:`Connection` or associated :class:`cursors `.""" + +class ConnectionNotClosedError(Error): + """This exception is no longer generated. It was required in earlier + releases due to constraints in threading usage with SQLite.""" + +class ConstraintError(Error): + """*SQLITE_CONSTRAINT*. Abort due to `constraint + `_ violation. This + would happen if the schema required a column to be within a specific + range. If you have multiple constraints, you `can't tell + `__ + which one was the cause.""" + +class CorruptError(Error): + """*SQLITE_CORRUPT*. The database disk image appears to be a + SQLite database but the values inside are inconsistent.""" + +class CursorClosedError(Error): + """You have called :meth:`Cursor.close` and then tried to use the cursor.""" + +class EmptyError(Error): + """*SQLITE_EMPTY*. Database is completely empty.""" + +class ExecTraceAbort(Error): + """The :ref:`execution tracer ` returned False so + execution was aborted.""" + +class ExecutionCompleteError(Error): + """A statement is complete but you try to run it more anyway!""" + +class ExtensionLoadingError(Error): + """An error happened loading an `extension + `_.""" + +class ForkingViolationError(Error): + """See :meth:`apsw.fork_checker`.""" + +class FormatError(Error): + """*SQLITE_FORMAT*. (No longer used) `Auxiliary database `_ format error.""" + +class FullError(Error): + """*SQLITE_FULL*. The disk appears to be full.""" + +class IOError(Error): + """*SQLITE_IOERR*. Some kind of disk I/O error occurred. The + :ref:`extended error code ` will give more detail.""" + +class IncompleteExecutionError(Error): + """You have tried to start a new SQL execute call before executing all + the previous ones. See the :ref:`execution model ` + for more details.""" + +class InternalError(Error): + """*SQLITE_INTERNAL*. (No longer used) Internal logic error in SQLite.""" + +class InterruptError(Error): + """*SQLITE_INTERRUPT*. Operation terminated by + `sqlite3_interrupt `_ - + use :meth:`Connection.interrupt`.""" + +class LockedError(Error): + """*SQLITE_LOCKED*. A table in the database is locked.""" + +class MismatchError(Error): + """*SQLITE_MISMATCH*. Data type mismatch. For example a rowid + or integer primary key must be an integer.""" + +class MisuseError(Error): + """*SQLITE_MISUSE*. SQLite library used incorrectly - typically similar to *ValueError* in Python. Examples include not + having enough flags when opening a connection (eg not including a READ or WRITE flag), or out of spec such as registering + a function with more than 127 parameters.""" + +class NoLFSError(Error): + """*SQLITE_NOLFS*. SQLite has attempted to use a feature not + supported by the operating system such as `large file support + `_.""" + +class NoMemError(Error): + """*SQLITE_NOMEM*. A memory allocation failed.""" + +class NotADBError(Error): + """*SQLITE_NOTADB*. File opened that is not a database file. + SQLite has a header on database files to verify they are indeed + SQLite databases.""" + +class NotFoundError(Error): + """*SQLITE_NOTFOUND*. Returned when various internal items were + not found such as requests for non-existent system calls or file + controls.""" + +class PermissionsError(Error): + """*SQLITE_PERM*. Access permission denied by the operating system, or parts of the database are readonly such as a cursor.""" + +class ProtocolError(Error): + """*SQLITE_PROTOCOL*. (No longer used) Database lock protocol error.""" + +class RangeError(Error): + """*SQLITE_RANGE*. (Cannot be generated using APSW). 2nd parameter to `sqlite3_bind `_ out of range""" + +class ReadOnlyError(Error): + """*SQLITE_READONLY*. Attempt to write to a readonly database.""" + +class SQLError(Error): + """*SQLITE_ERROR*. This error is documented as a bad SQL query + or missing database, but is also returned for a lot of other + situations. It is the default error code unless there is a more + specific one.""" + +class SchemaChangeError(Error): + """*SQLITE_SCHEMA*. The database schema changed. A + :meth:`prepared statement ` becomes invalid + if the database schema was changed. Behind the scenes SQLite + reprepares the statement. Another or the same :class:`Connection` + may change the schema again before the statement runs. SQLite will + attempt up to 5 times before giving up and returning this error.""" + +class ThreadingViolationError(Error): + """You have used an object concurrently in two threads. For example you + may try to use the same cursor in two different threads at the same + time, or tried to close the same connection in two threads at the + same time. + + You can also get this exception by using a cursor as an argument to + itself (eg as the input data for :meth:`Cursor.executemany`). + Cursors can only be used for one thing at a time.""" + +class TooBigError(Error): + """*SQLITE_TOOBIG*. String or BLOB exceeds size limit. You can + change the limits using :meth:`Connection.limit`.""" + +class VFSFileClosedError(Error): + """The VFS file is closed so the operation cannot be performed.""" +class VFSNotImplementedError(Error): + """A call cannot be made to an inherited :ref:`VFS` method as the VFS + does not implement the method.""" -class Error(Exception): ... -class AbortError(Error): ... -class AuthError(Error): ... -class BindingsError(Error): ... -class BusyError(Error): ... -class CantOpenError(Error): ... -class ConnectionClosedError(Error): ... -class ConnectionNotClosedError(Error): ... -class ConstraintError(Error): ... -class CorruptError(Error): ... -class CursorClosedError(Error): ... -class EmptyError(Error): ... -class ExecTraceAbort(Error): ... -class ExecutionCompleteError(Error): ... -class ExtensionLoadingError(Error): ... -class ForkingViolationError(Error): ... -class FormatError(Error): ... -class FullError(Error): ... -class IOError(Error): ... -class IncompleteExecutionError(Error): ... -class InternalError(Error): ... -class InterruptError(Error): ... -class LockedError(Error): ... -class MismatchError(Error): ... -class MisuseError(Error): ... -class NoLFSError(Error): ... -class NoMemError(Error): ... -class NotADBError(Error): ... -class NotFoundError(Error): ... -class PermissionsError(Error): ... -class ProtocolError(Error): ... -class RangeError(Error): ... -class ReadOnlyError(Error): ... -class SQLError(Error): ... -class SchemaChangeError(Error): ... -class ThreadingViolationError(Error): ... -class TooBigError(Error): ... -class VFSFileClosedError(Error): ... -class VFSNotImplementedError(Error): ... diff -Nru python-apsw-3.39.2.0/apsw/__main__.py python-apsw-3.40.0.0/apsw/__main__.py --- python-apsw-3.39.2.0/apsw/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ python-apsw-3.40.0.0/apsw/__main__.py 2022-10-18 09:40:42.000000000 +0000 @@ -0,0 +1,5 @@ +#!/usr/bin/env python3 + +import apsw.shell + +apsw.shell.main() \ No newline at end of file diff -Nru python-apsw-3.39.2.0/apsw/shell.py python-apsw-3.40.0.0/apsw/shell.py --- python-apsw-3.39.2.0/apsw/shell.py 1970-01-01 00:00:00.000000000 +0000 +++ python-apsw-3.40.0.0/apsw/shell.py 2022-11-24 09:18:04.000000000 +0000 @@ -0,0 +1,3005 @@ +#!/usr/bin/env python3 + +import sys +import apsw +import shlex +import os +import csv +import re +import textwrap +import time +import codecs +import base64 + +from typing import TextIO + +class Shell: + """Implements a SQLite shell + + :param stdin: Where to read input from (default sys.stdin) + :param stdout: Where to send output (default sys.stdout) + :param stderr: Where to send errors (default sys.stderr) + :param encoding: Default encoding for files opened/created by the + Shell. If you want stdin/out/err to use a particular encoding + then you need to provide them `already configured `__ that way. + :param args: This should be program arguments only (ie if + passing in sys.argv do not include sys.argv[0] which is the + program name. You can also pass in None and then call + :meth:`process_args` if you want to catch any errors + in handling the arguments yourself. + :param db: A existing :class:`~apsw.Connection` you wish to use + + The commands and behaviour are modelled after the `interactive + shell `__ that is part of + SQLite. + + You can inherit from this class to embed in your own code and user + interface. Internally everything is handled as unicode. + Conversions only happen at the point of input or output which you + can override in your own code. + + Errors and diagnostics are only ever sent to error output + (self.stderr) and never to the regular output (self.stdout). This + means using shell output is always easy and consistent. + + Shell commands begin with a dot (eg .help). They are implemented + as a method named after the command (eg command_help). The method + is passed one parameter which is the list of arguments to the + command. + + Output modes are implemented by functions named after the mode (eg + output_column). + + When you request help the help information is automatically + generated from the docstrings for the command and output + functions. + + You should not use a Shell object concurrently from multiple + threads. It is one huge set of state information which would + become inconsistent if used simultaneously, and then give baffling + errors. It is safe to call methods one at a time from different + threads. ie it doesn't care what thread calls methods as long as + you don't call more than one concurrently. + """ + + class Error(Exception): + """Class raised on errors. The expectation is that the error + will be displayed by the shell as text so there are no + specific subclasses as the distinctions between different + types of errors doesn't matter.""" + pass + + def __init__(self, stdin: TextIO = None, stdout=None, stderr=None, encoding: str = "utf8", args=None, db=None): + """Create instance, set defaults and do argument processing.""" + # The parameter doc has to be in main class doc as sphinx + # ignores any described here + self.exceptions = False + self.history_file = "~/.sqlite_history" + self._db = None + self.dbfilename = None + if db: + self.db = db, db.filename + else: + self.db = None, None + self.prompt = "sqlite> " + self.moreprompt = " ..> " + self.separator = "|" + self.bail = False + self.echo = False + self.timer = False + self.header = False + self.nullvalue = "" + self.output = self.output_list + self._output_table = self._fmt_sql_identifier("table") + self.widths = [] + # do we truncate output in list mode? (explain doesn't, regular does) + self.truncate = True + # a stack of previous outputs. turning on explain saves previous, off restores + self._output_stack = [] + + # other stuff + self.set_encoding(encoding) + if stdin is None: stdin = sys.stdin + if stdout is None: stdout = sys.stdout + if stderr is None: stderr = sys.stderr + self.stdin = stdin + self.stdout = stdout + self._original_stdout = stdout + self.stderr = stderr + # we don't become interactive until the command line args are + # successfully parsed and acted upon + self.interactive = None + # current colouring object + self.command_colour() # set to default + self._using_readline = False + self._input_stack = [] + self.input_line_number = 0 + self.push_input() + self.push_output() + self._input_descriptions = [] + + if args: + try: + self.process_args(args) + except: + if len(self._input_descriptions): + self._input_descriptions.append("Processing command line arguments") + self.handle_exception() + raise + + if self.interactive is None: + self.interactive = getattr(self.stdin, "isatty", False) and self.stdin.isatty() and getattr( + self.stdout, "isatty", False) and self.stdout.isatty() + + def _ensure_db(self): + "The database isn't opened until first use. This function ensures it is now open." + if not self._db: + if not self.dbfilename: + self.dbfilename = ":memory:" + self._db = apsw.Connection(self.dbfilename, + flags=apsw.SQLITE_OPEN_URI | apsw.SQLITE_OPEN_READWRITE + | apsw.SQLITE_OPEN_CREATE) + return self._db + + def _set_db(self, newv): + "Sets the open database (or None) and filename" + (db, dbfilename) = newv + if self._db: + self._db.close(True) + self._db = None + self._db = db + self.dbfilename = dbfilename + + db = property(_ensure_db, _set_db, None, "The current :class:`~apsw.Connection`") + + def process_args(self, args): + """Process command line options specified in args. It is safe to + call this multiple times. We try to be compatible with SQLite shell + argument parsing. + + :param args: A list of string options. Do not include the + program as args[0] + + :returns: A tuple of (databasefilename, initfiles, + sqlncommands). This is provided for informational purposes + only - they have already been acted upon. An example use + is that the SQLite shell does not enter the main interactive + loop if any sql/commands were provided. + + The first non-option is the database file name. Each + remaining non-option is treated as a complete input (ie it + isn't joined with others looking for a trailing semi-colon). + + The SQLite shell uses single dash in front of options. We + allow both single and double dashes. When an unrecognized + argument is encountered then + :meth:`process_unknown_args` is called. + """ + # we don't use optparse as we need to use single dashes for + # options - all hand parsed + if not args: + return None, [], [] + + # are options still valid? + options = True + # have we seen the database name? + havedbname = False + # List of init files to read + inits = [] + # List of sql/dot commands + sqls = [] + + while args: + if not options or not args[0].startswith("-"): + options = False + if not havedbname: + # grab new database + self.db = None, args[0] + havedbname = True + else: + sqls.append(args[0]) + args = args[1:] + continue + + # remove initial single or double dash + args[0] = args[0][1:] + if args[0].startswith("-"): + args[0] = args[0][1:] + + if args[0] == "init": + if len(args) < 2: + raise self.Error("You need to specify a filename after -init") + inits.append(args[1]) + args = args[2:] + continue + + if args[0] == "header" or args[0] == "noheader": + self.header = args[0] == "header" + args = args[1:] + continue + + if args[0] in ("echo", "bail", "interactive"): + setattr(self, args[0], True) + args = args[1:] + continue + + if args[0] == "batch": + self.interactive = False + args = args[1:] + continue + + if args[0] in ("separator", "nullvalue", "encoding"): + if len(args) < 2: + raise self.Error("You need to specify a value after -" + args[0]) + getattr(self, "command_" + args[0])([args[1]]) + args = args[2:] + continue + + if args[0] == "version": + self.write(self.stdout, apsw.sqlitelibversion() + "\n") + # A pretty gnarly thing to do + sys.exit(0) + + if args[0] == "help": + self.write(self.stderr, self.usage()) + sys.exit(0) + + if args[0] in ("no-colour", "no-color", "nocolour", "nocolor"): + self.colour_scheme = "off" + self._out_colour() + args = args[1:] + continue + + # only remaining known args are output modes + if getattr(self, "output_" + args[0], None): + self.command_mode(args[:1]) + args = args[1:] + continue + + newargs = self.process_unknown_args(args) + if newargs is None: + raise self.Error("Unrecognized argument '" + args[0] + "'") + args = newargs + + for f in inits: + self.command_read([f]) + + for s in sqls: + self.process_complete_line(s) + + return self.dbfilename, inits, sqls + + def process_unknown_args(self, args): + """This is called when :meth:`process_args` encounters an + argument it doesn't understand. Override this method if you + want to be able to understand additional command line arguments. + + :param args: A list of the remaining arguments. The initial one will + have had the leading dashes removed (eg if it was --foo on the command + line then args[0] will be "foo" + :returns: None if you don't recognize the argument either. Otherwise + return the list of remaining arguments after you have processed + yours. + """ + return None + + def usage(self): + "Returns the usage message. Make sure it is newline terminated" + + msg = """ +Usage: program [OPTIONS] FILENAME [SQL|CMD] [SQL|CMD]... +FILENAME is the name of a SQLite database. A new database is +created if the file does not exist. +OPTIONS include: + -init filename read/process named file + -echo print commands before execution + -[no]header turn headers on or off + -bail stop after hitting an error + -interactive force interactive I/O + -batch force batch I/O + -column set output mode to 'column' + -csv set output mode to 'csv' + -html set output mode to 'html' + -line set output mode to 'line' + -list set output mode to 'list' + -python set output mode to 'python' + -separator 'x' set output field separator (|) + -nullvalue 'text' set text string for NULL values + -version show SQLite version + -encoding 'name' the encoding to use for files + opened via .import, .read & .output + -nocolour disables colour output to screen +""" + return msg.lstrip() + + ### + ### Value formatting routines. They take a value and return a + ### text formatting of them. Mostly used by the various output's + ### but also by random other pieces of code. + ### + + _binary_type = bytes + _basestring = str + + # bytes that are ok in C strings - no need for quoting + _printable = [ + ord(x) for x in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789~!@#$%^&*()`_-+={}[]:;,.<>/?|" + ] + + def _fmt_c_string(self, v): + "Format as a C string including surrounding double quotes" + if isinstance(v, self._basestring): + op = ['"'] + for c in v: + if c == "\\": + op.append("\\\\") + elif c == "\r": + op.append("\\r") + elif c == "\n": + op.append("\\n") + elif c == "\t": + op.append("\\t") + elif ord(c) not in self._printable: + op.append("\\" + c) + else: + op.append(c) + op.append('"') + return "".join(op) + elif v is None: + return '"' + self.nullvalue + '"' + elif isinstance(v, self._binary_type): + o = lambda x: x + fromc = chr + res = ['"'] + for c in v: + if o(c) in self._printable: + res.append(fromc(c)) + else: + res.append("\\x%02X" % (o(c), )) + res.append('"') + return "".join(res) + else: + # number of some kind + return '"%s"' % (v, ) + + def _fmt_html_col(self, v): + "Format as HTML (mainly escaping &/" + return self._fmt_text_col(v).\ + replace("&", "&"). \ + replace(">", ">"). \ + replace("<", "<"). \ + replace("'", "'"). \ + replace('"', """) + + def _fmt_json_value(self, v): + "Format a value." + if isinstance(v, self._basestring): + # we assume utf8 so only some characters need to be escaed + op = ['"'] + for c in v: + if c == "\\": + op.append("\\\\") + elif c == "\r": + op.append("\\r") + elif c == "\n": + op.append("\\n") + elif c == "\t": + op.append("\\t") + elif c == "/": # yes you have to escape forward slash for some reason + op.append("\\/") + elif c == '"': + op.append("\\" + c) + elif c == "\\b": + op.append("\\b") + elif c == "\\f": + op.append("\\f") + else: + # It isn't clear when \u sequences *must* be used. + # Assuming not needed due to utf8 output which + # corresponds to what rfc4627 implies. + op.append(c) + op.append('"') + return "".join(op) + elif v is None: + return 'null' + elif isinstance(v, self._binary_type): + o = base64.encodebytes(v).decode("ascii") + if o[-1] == "\n": + o = o[:-1] + return '"' + o + '"' + else: + # number of some kind + return '%s' % (v, ) + + def _fmt_python(self, v): + "Format as python literal" + if v is None: + return "None" + elif isinstance(v, self._basestring): + return repr(v) + elif isinstance(v, self._binary_type): + res = ['b"'] + for i in v: + if i in self._printable: + res.append(chr(i)) + else: + res.append("\\x%02X" % (i, )) + res.append('"') + return "".join(res) + else: + return "%s" % (v, ) + + def _fmt_sql_identifier(self, v): + "Return the identifier quoted in SQL syntax if needed (eg table and column names)" + if not len(v): # yes sqlite does allow zero length identifiers + return '""' + nonalnum = re.sub("[A-Za-z_0-9]+", "", v) + if len(nonalnum) == 0: + if v.upper() not in self._sqlite_reserved: + # Ok providing it doesn't start with a digit + if v[0] not in "0123456789": + return v + # double quote it unless there are any double quotes in it + if '"' in nonalnum: + return "[%s]" % (v, ) + return '"%s"' % (v, ) + + def _fmt_text_col(self, v): + "Regular text formatting" + if v is None: + return self.nullvalue + elif isinstance(v, self._basestring): + return v + elif isinstance(v, self._binary_type): + # sqlite gives back raw bytes! + return "" + else: + return "%s" % (v, ) + + ### + ### The various output routines. They are always called with the + ### header irrespective of the setting allowing for some per query + ### setup. (see output_column for example). The doc strings are + ### used to generate help. + ### + + def output_column(self, header, line): + """ + Items left aligned in space padded columns. They are + truncated if they do not fit. If the width hasn't been + specified for a column then 10 is used unless the column name + (header) is longer in which case that width is used. Use the + .width command to change column sizes. + """ + # as an optimization we calculate self._actualwidths which is + # reset for each query + if header: + + def gw(n): + if n < len(self.widths) and self.widths[n] != 0: + return self.widths[n] + # if width is not present or 0 then autosize + text = self._fmt_text_col(line[n]) + return max(len(text), 10) + + widths = [gw(i) for i in range(len(line))] + + if self.truncate: + self._actualwidths = ["%" + ("-%d.%ds", "%d.%ds")[w < 0] % (abs(w), abs(w)) for w in widths] + else: + self._actualwidths = ["%" + ("-%ds", "%ds")[w < 0] % (abs(w), ) for w in widths] + + if self.header: + # output the headers + c = self.colour + cols = [ + c.header + (self._actualwidths[i] % (self._fmt_text_col(line[i]), )) + c.header_ + for i in range(len(line)) + ] + # sqlite shell uses two spaces between columns + self.write(self.stdout, " ".join(cols) + "\n") + if c is self._colours["off"]: + self.output_column(False, ["-" * abs(widths[i]) for i in range(len(widths))]) + return + cols = [ + self.colour.colour_value(line[i], self._actualwidths[i] % (self._fmt_text_col(line[i]), )) + for i in range(len(line)) + ] + # sqlite shell uses two spaces between columns + self.write(self.stdout, " ".join(cols) + "\n") + + output_columns = output_column + + def output_csv(self, header, line): + """ + Items in csv format (comma separated). Use tabs mode for tab + separated. You can use the .separator command to use a + different one after switching mode. A separator of comma uses + double quotes for quoting while other separators do not do any + quoting. The Python csv library used for this only supports + single character separators. + """ + + # we use self._csv for the work, setup when header is + # supplied. _csv is a tuple of a StringIO and the csv.writer + # instance. + + fixdata = lambda x: x + + if header: + import io + s = io.StringIO() + kwargs = {} + if self.separator == ",": + kwargs["dialect"] = "excel" + elif self.separator == "\t": + kwargs["dialect"] = "excel-tab" + else: + kwargs["quoting"] = csv.QUOTE_NONE + kwargs["delimiter"] = fixdata(self.separator) + kwargs["doublequote"] = False + # csv module is bug ridden junk - I already say no + # quoting so it still looks for the quotechar and then + # gets upset that it can't be quoted. Which bit of no + # quoting was ambiguous? + kwargs["quotechar"] = "\x00" + + writer = csv.writer(s, **kwargs) + self._csv = (s, writer) + if self.header: + self.output_csv(None, line) + return + + if header is None: + c = self.colour + line = [c.header + fixdata(self._fmt_text_col(l)) + c.header_ for l in line] + else: + fmt = lambda x: self.colour.colour_value(x, fixdata(self._fmt_text_col(x))) + line = [fmt(l) for l in line] + self._csv[1].writerow(line) + t = self._csv[0].getvalue() + # csv lib always does DOS eol + assert (t.endswith("\r\n")) + t = t[:-2] + # should not be other eol irregularities + assert (not t.endswith("\r") and not t.endswith("\n")) + self.write(self.stdout, t + "\n") + self._csv[0].truncate(0) + self._csv[0].seek(0) + + def output_html(self, header, line): + "HTML table style" + if header: + if not self.header: + return + fmt = lambda x: self.colour.header + self._fmt_html_col(x) + self.colour.header_ + else: + fmt = lambda x: self.colour.colour_value(x, self._fmt_html_col(x)) + line = [fmt(l) for l in line] + out = [""] + for l in line: + out.append(("", "")[header]) + out.append(l) + out.append(("\n", "\n")[header]) + out.append("\n") + self.write(self.stdout, "".join(out)) + + def output_insert(self, header, line): + """ + Lines as SQL insert statements. The table name is "table" + unless you specified a different one as the second parameter + to the .mode command. + """ + if header: + return + fmt = lambda x: self.colour.colour_value(x, apsw.format_sql_value(x)) + out = "INSERT INTO " + self._output_table + " VALUES(" + ",".join([fmt(l) for l in line]) + ");\n" + self.write(self.stdout, out) + + def output_json(self, header, line): + """ + Each line as a JSON object with a trailing comma. Blobs are + output as base64 encoded strings. You should be using UTF8 + output encoding. + """ + if header: + self._output_json_cols = line + return + fmt = lambda x: self.colour.colour_value(x, self._fmt_json_value(x)) + out = ["%s: %s" % (self._fmt_json_value(k), fmt(line[i])) for i, k in enumerate(self._output_json_cols)] + self.write(self.stdout, "{ " + ", ".join(out) + "},\n") + + def output_line(self, header, line): + """ + One value per line in the form 'column = value' with a blank + line between rows. + """ + if header: + w = 5 + for l in line: + if len(l) > w: + w = len(l) + self._line_info = (w, line) + return + fmt = lambda x: self.colour.colour_value(x, self._fmt_text_col(x)) + w = self._line_info[0] + for i in range(len(line)): + self.write(self.stdout, "%*s = %s\n" % (w, self._line_info[1][i], fmt(line[i]))) + self.write(self.stdout, "\n") + + output_lines = output_line + + def output_list(self, header, line): + "All items on one line with separator" + if header: + if not self.header: + return + c = self.colour + fmt = lambda x: c.header + x + c.header_ + else: + fmt = lambda x: self.colour.colour_value(x, self._fmt_text_col(x)) + self.write(self.stdout, self.separator.join([fmt(x) for x in line]) + "\n") + + def output_python(self, header, line): + "Tuples in Python source form for each row" + if header: + if not self.header: + return + c = self.colour + fmt = lambda x: c.header + self._fmt_python(x) + c.header_ + else: + fmt = lambda x: self.colour.colour_value(x, self._fmt_python(x)) + self.write(self.stdout, '(' + ", ".join([fmt(l) for l in line]) + "),\n") + + def output_tcl(self, header, line): + "Outputs TCL/C style strings using current separator" + # In theory you could paste the output into your source ... + if header: + if not self.header: + return + c = self.colour + fmt = lambda x: c.header + self._fmt_c_string(x) + c.header_ + else: + fmt = lambda x: self.colour.colour_value(x, self._fmt_c_string(x)) + self.write(self.stdout, self.separator.join([fmt(l) for l in line]) + "\n") + + def _output_summary(self, summary): + # internal routine to output a summary line or two + self.write(self.stdout, self.colour.summary + summary + self.colour.summary_) + + ### + ### Various routines + ### + + def cmdloop(self, intro=None): + """Runs the main interactive command loop. + + :param intro: Initial text banner to display instead of the + default. Make sure you newline terminate it. + """ + if intro is None: + intro = """ +SQLite version %s (APSW %s) +Enter ".help" for instructions +Enter SQL statements terminated with a ";" +""" % (apsw.sqlitelibversion(), apsw.apswversion()) + intro = intro.lstrip() + if self.interactive and intro: + c = self.colour + self.write(self.stdout, c.intro + intro + c.intro_) + + using_readline = False + try: + if self.interactive and self.stdin is sys.stdin: + import readline + old_completer = readline.get_completer() + readline.set_completer(self.complete) + readline.parse_and_bind("tab: complete") + using_readline = True + try: + readline.read_history_file(os.path.expanduser(self.history_file)) + except: + # We only expect IOError here but if the history + # file does not exist and this code has been + # compiled into the module it is possible to get + # an IOError that doesn't match the IOError from + # Python parse time resulting in an IOError + # exception being raised. Consequently we just + # catch all exceptions. + pass + except ImportError: + pass + + try: + while True: + self._input_descriptions = [] + if using_readline: + # we drop completion cache because it contains + # table and column names which could have changed + # with last executed SQL + self._completion_cache = None + self._using_readline = True + try: + command = self.getcompleteline() + if command is None: # EOF + return + self.process_complete_line(command) + except: + self._append_input_description() + try: + self.handle_exception() + except UnicodeDecodeError: + self.handle_exception() + finally: + if using_readline: + readline.set_completer(old_completer) + readline.set_history_length(256) + readline.write_history_file(os.path.expanduser(self.history_file)) + + def handle_exception(self): + """Handles the current exception, printing a message to stderr as appropriate. + It will reraise the exception if necessary (eg if bail is true)""" + eclass, eval, etb = sys.exc_info() # py2&3 compatible way of doing this + if isinstance(eval, SystemExit): + eval._handle_exception_saw_this = True + raise + + self._out_colour() + self.write(self.stderr, self.colour.error) + + if isinstance(eval, KeyboardInterrupt): + self.handle_interrupt() + text = "Interrupted" + else: + text = str(eval) + + if not text.endswith("\n"): + text = text + "\n" + + if len(self._input_descriptions): + for i in range(len(self._input_descriptions)): + if i == 0: + pref = "At " + else: + pref = " " * i + "From " + self.write(self.stderr, pref + self._input_descriptions[i] + "\n") + + self.write(self.stderr, text) + if self.exceptions: + stack = [] + while etb: + stack.append(etb.tb_frame) + etb = etb.tb_next + + for frame in stack: + self.write( + self.stderr, + "\nFrame %s in %s at line %d\n" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno)) + vars = list(frame.f_locals.items()) + vars.sort() + for k, v in vars: + try: + v = repr(v)[:80] + except: + v = "" + self.write(self.stderr, "%10s = %s\n" % (k, v)) + self.write(self.stderr, "\n%s: %s\n" % (eclass, repr(eval))) + + self.write(self.stderr, self.colour.error_) + + eval._handle_exception_saw_this = True + if self.bail: + raise + + def process_sql(self, sql, bindings=None, internal=False, summary=None): + """Processes SQL text consisting of one or more statements + + :param sql: SQL to execute + + :param bindings: bindings for the *sql* + + :param internal: If True then this is an internal execution + (eg the .tables or .database command). When executing + internal sql timings are not shown nor is the SQL echoed. + + :param summary: If not None then should be a tuple of two + items. If the ``sql`` returns any data then the first item + is printed before the first row, and the second item is + printed after the last row. An example usage is the .find + command which shows table names. + """ + cur = self.db.cursor() + # we need to know when each new statement is executed + state = {'newsql': True, 'timing': None} + + def et(cur, sql, bindings): + state['newsql'] = True + # if time reporting, do so now + if not internal and self.timer: + if state['timing']: + self.display_timing(state['timing'], self.get_resource_usage()) + # print statement if echo is on + if not internal and self.echo: + # ? should we strip leading and trailing whitespace? backslash quote stuff? + if bindings: + self.write(self.stderr, "%s [%s]\n" % (sql, bindings)) + else: + self.write(self.stderr, sql + "\n") + # save resource from beginning of command (ie don't include echo time above) + if not internal and self.timer: + state['timing'] = self.get_resource_usage() + return True + + cur.exectrace = et + # processing loop + try: + for row in cur.execute(sql, bindings): + if state['newsql']: + # summary line? + if summary: + self._output_summary(summary[0]) + # output a header always + cols = [h for h, d in cur.getdescription()] + self.output(True, cols) + state['newsql'] = False + self.output(False, row) + if not state['newsql'] and summary: + self._output_summary(summary[1]) + except: + # If echo is on and the sql to execute is a syntax error + # then the exec tracer won't have seen it so it won't be + # printed and the user will be wondering exactly what sql + # had the error. We look in the traceback and deduce if + # the error was happening in a prepare or not. + if not internal and self.echo: + tb = sys.exc_info()[2] + last = None + while tb: + last = tb.tb_frame + tb = tb.tb_next + raise + + if not internal and self.timer: + self.display_timing(state['timing'], self.get_resource_usage()) + + def process_command(self, cmd): + """Processes a dot command. It is split into parts using the + `shlex.split + `__ + function which is roughly the same method used by Unix/POSIX + shells. + """ + if self.echo: + self.write(self.stderr, cmd + "\n") + cmd = shlex.split(cmd) + assert cmd[0][0] == "." + cmd[0] = cmd[0][1:] + fn = getattr(self, "command_" + cmd[0], None) + if not fn: + raise self.Error("Unknown command \"%s\". Enter \".help\" for help" % (cmd[0], )) + res = fn(cmd[1:]) + + ### + ### Commands start here + ### + + def _boolean_command(self, name, cmd): + "Parse and verify boolean parameter" + if len(cmd) != 1 or cmd[0].lower() not in ("on", "off"): + raise self.Error(name + " expected ON or OFF") + return cmd[0].lower() == "on" + + # Note that doc text is used for generating help output. + + def command_backup(self, cmd): + """backup ?DB? FILE: Backup DB (default "main") to FILE + + Copies the contents of the current database to FILE + overwriting whatever was in FILE. If you have attached databases + then you can specify their name instead of the default of "main". + + The backup is done at the page level - SQLite copies the pages + as is. There is no round trip through SQL code. + """ + dbname = "main" + if len(cmd) == 1: + fname = cmd[0] + elif len(cmd) == 2: + dbname = cmd[0] + fname = cmd[1] + else: + raise self.Error("Backup takes one or two parameters") + out = apsw.Connection(fname) + b = out.backup("main", self.db, dbname) + try: + while not b.done: + b.step() + finally: + b.finish() + out.close() + + def command_bail(self, cmd): + """bail ON|OFF: Stop after hitting an error (default OFF) + + If an error is encountered while processing commands or SQL + then exit. (Note this is different than SQLite shell which + only exits for errors in SQL.) + """ + self.bail = self._boolean_command("bail", cmd) + + def command_colour(self, cmd=[]): + """colour SCHEME: Selects a colour scheme + + Residents of both countries that have not adopted the metric + system may also spell this command without a 'u'. If using a + colour terminal in interactive mode then output is + automatically coloured to make it more readable. Use 'off' to + turn off colour, and no name or 'default' for the default. + """ + if len(cmd) > 1: + raise self.Error("Too many colour schemes") + c = cmd and cmd[0] or "default" + if c not in self._colours: + raise self.Error("No such colour scheme: " + c) + self.colour_scheme = c + self._out_colour() + + command_color = command_colour + + def command_databases(self, cmd): + """databases: Lists names and files of attached databases + + """ + if len(cmd): + raise self.Error("databases command doesn't take any parameters") + self.push_output() + self.header = True + self.output = self.output_column + self.truncate = False + self.widths = [3, 15, 58] + try: + self.process_sql("pragma database_list", internal=True) + finally: + self.pop_output() + + def command_dump(self, cmd): + """dump ?TABLE? [TABLE...]: Dumps all or specified tables in SQL text format + + The table name is treated as like pattern so you can use % as + a wildcard. You can use dump to make a text based backup of + the database. It is also useful for comparing differences or + making the data available to other databases. Indices and + triggers for the table(s) are also dumped. Finally views + matching the table pattern name are dumped (it isn't possible + to work out which views access which table and views can + access multiple tables anyway). + + Note that if you are dumping virtual tables such as used by + the FTS3 module then they may use other tables to store + information. For example if you create a FTS3 table named + *recipes* then it also creates *recipes_content*, + *recipes_segdir* etc. Consequently to dump this example + correctly use:: + + .dump recipes recipes_% + + If the database is empty or no tables/views match then there + is no output. + """ + # Simple tables are easy to dump. More complicated is dealing + # with virtual tables, foreign keys etc. + + # Lock the database while doing the dump so nothing changes + # under our feet + self.process_sql("BEGIN IMMEDIATE", internal=True) + + # Used in comment() - see issue 142 + outputstrtype = str + + # Python 2.3 can end up with nonsense like "en_us" so we fall + # back to ascii in that case + outputstrencoding = getattr(self.stdout, "encoding", "ascii") + try: + codecs.lookup(outputstrencoding) + except: + outputstrencoding = "ascii" + + def unicodify(s): + if not isinstance(s, outputstrtype): + # See issue 142 - it may not be in an expected encoding + return s.decode(outputstrencoding, "replace") + return s + + try: + # first pass -see if virtual tables or foreign keys are in + # use. If they are we emit pragmas to deal with them, but + # prefer not to emit them + v = {"virtuals": False, "foreigns": False} + + def check(name, sql): + if name.lower().startswith("sqlite_"): + return False + sql = sql.lower() + if re.match(r"^\s*create\s+virtual\s+.*", sql): + v["virtuals"] = True + # pragma table_info doesn't tell us if foreign keys + # are involved so we guess if any the various strings are + # in the sql somewhere + if re.match(r".*\b(foreign\s*key|references)\b.*", sql): + v["foreigns"] = True + return True + + if len(cmd) == 0: + cmd = ["%"] + + tables = [] + for pattern in cmd: + for name, sql in self.db.execute( + "SELECT name,sql FROM sqlite_master " + "WHERE sql NOT NULL AND type IN ('table','view') " + "AND tbl_name LIKE ?1", (pattern, )): + if check(name, sql) and name not in tables: + tables.append(name) + + if not tables: + return + + # will we need to analyze anything later? + analyze_needed = [] + for stat in self.db.execute( + "select name from sqlite_master where sql not null and type='table' and tbl_name like 'sqlite_stat%'" + ): + for name in tables: + if len(self.db.execute( + "select * from " + self._fmt_sql_identifier(stat[0]) + " WHERE tbl=?", + (name, )).fetchall()): + if name not in analyze_needed: + analyze_needed.append(name) + analyze_needed.sort() + + def blank(): + self.write(self.stdout, "\n") + + def comment(s): + s = unicodify(s) + self.write(self.stdout, textwrap.fill(s, 78, initial_indent="-- ", subsequent_indent="-- ") + "\n") + + pats = ", ".join([(x, "(All)")[x == "%"] for x in cmd]) + comment("SQLite dump (by APSW %s)" % (apsw.apswversion(), )) + comment("SQLite version " + apsw.sqlitelibversion()) + comment("Date: " + unicodify(time.strftime("%c"))) + comment("Tables like: " + pats) + comment("Database: " + self.db.filename) + try: + import getpass + import socket + comment("User: %s @ %s" % (unicodify(getpass.getuser()), unicodify(socket.gethostname()))) + except ImportError: + pass + blank() + + comment("The values of various per-database settings") + self.write(self.stdout, + "PRAGMA page_size=" + str(self.db.execute("pragma page_size").fetchall()[0][0]) + ";\n") + comment("PRAGMA encoding='" + self.db.execute("pragma encoding").fetchall()[0][0] + "';\n") + vac = {0: "NONE", 1: "FULL", 2: "INCREMENTAL"} + vacvalue = self.db.execute("pragma auto_vacuum").fetchall()[0][0] + comment("PRAGMA auto_vacuum=" + vac.get(vacvalue, str(vacvalue)) + ";\n") + comment("PRAGMA max_page_count=" + str(self.db.execute("pragma max_page_count").fetchall()[0][0]) + + ";\n") + blank() + + # different python versions have different requirements + # about specifying cmp to sort routine so we use this + # portable workaround with a decorated list instead + dectables = [(x.lower(), x) for x in tables] + dectables.sort() + tables = [y for x, y in dectables] + + virtuals = v["virtuals"] + foreigns = v["foreigns"] + + if virtuals: + comment("This pragma is needed to restore virtual tables") + self.write(self.stdout, "PRAGMA writable_schema=ON;\n") + if foreigns: + comment("This pragma turns off checking of foreign keys " + "as tables would be inconsistent while restoring. It was introduced " + "in SQLite 3.6.19.") + self.write(self.stdout, "PRAGMA foreign_keys=OFF;\n") + + if virtuals or foreigns: + blank() + + self.write(self.stdout, "BEGIN TRANSACTION;\n") + blank() + + def sqldef(s): + # return formatted sql watching out for embedded + # comments at the end forcing trailing ; onto next + # line https://sqlite.org/src/info/c04a8b8a4f + if "--" in s.split("\n")[-1]: + nl = "\n" + else: + nl = "" + return s + nl + ";\n" + + # do the table dumping loops + oldtable = self._output_table + try: + self.push_output() + self.output = self.output_insert + # Dump the table + for table in tables: + for sql in self.db.execute("SELECT sql FROM sqlite_master WHERE name=?1 AND type='table'", + (table, )): + comment("Table " + table) + # Special treatment for virtual tables - they + # get called back on drops and creates and + # could thwart us so we have to manipulate + # sqlite_master directly + if sql[0].lower().split()[:3] == ["create", "virtual", "table"]: + self.write( + self.stdout, "DELETE FROM sqlite_master WHERE name=" + apsw.format_sql_value(table) + + " AND type='table';\n") + self.write( + self.stdout, + "INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql) VALUES('table',%s,%s,0,%s);\n" + % (apsw.format_sql_value(table), apsw.format_sql_value(table), + apsw.format_sql_value(sql[0]))) + else: + self.write(self.stdout, "DROP TABLE IF EXISTS " + self._fmt_sql_identifier(table) + ";\n") + self.write(self.stdout, sqldef(sql[0])) + self._output_table = self._fmt_sql_identifier(table) + self.process_sql("select * from " + self._fmt_sql_identifier(table), internal=True) + # Now any indices or triggers + first = True + for name, sql in self.db.execute( + "SELECT name,sql FROM sqlite_master " + "WHERE sql NOT NULL AND type IN ('index', 'trigger') " + "AND tbl_name=?1 AND name NOT LIKE 'sqlite_%' " + "ORDER BY lower(name)", (table, )): + if first: + comment("Triggers and indices on " + table) + first = False + self.write(self.stdout, sqldef(sql)) + blank() + # Views done last. They have to be done in the same order as they are in sqlite_master + # as they could refer to each other + first = True + for name, sql in self.db.execute("SELECT name,sql FROM sqlite_master " + "WHERE sql NOT NULL AND type='view' " + "AND name IN ( " + + ",".join([apsw.format_sql_value(i) + for i in tables]) + ") ORDER BY _ROWID_"): + if first: + comment("Views") + first = False + self.write(self.stdout, "DROP VIEW IF EXISTS %s;\n" % (self._fmt_sql_identifier(name), )) + self.write(self.stdout, sqldef(sql)) + if not first: + blank() + + # sqlite sequence + # does it exist + if len(self.db.execute("select * from sqlite_master where name='sqlite_sequence'").fetchall()): + first = True + for t in tables: + v = self.db.execute("select seq from main.sqlite_sequence where name=?1", + (t, )).fetchall() + if len(v): + assert len(v) == 1 + if first: + comment("For primary key autoincrements the next id " + "to use is stored in sqlite_sequence") + first = False + self.write( + self.stdout, + 'DELETE FROM main.sqlite_sequence WHERE name=%s;\n' % (apsw.format_sql_value(t), )) + self.write( + self.stdout, 'INSERT INTO main.sqlite_sequence VALUES (%s, %s);\n' % + (apsw.format_sql_value(t), v[0][0])) + if not first: + blank() + finally: + self.pop_output() + self._output_table = oldtable + + # analyze + if analyze_needed: + comment("You had used the analyze command on these tables before. Rerun for this new data.") + for n in analyze_needed: + self.write(self.stdout, "ANALYZE " + self._fmt_sql_identifier(n) + ";\n") + blank() + + # user version pragma + uv = self.db.execute("pragma user_version").fetchall()[0][0] + if uv: + comment( + "Your database may need this. It is sometimes used to keep track of the schema version (eg Firefox does this)." + ) + self.write(self.stdout, "pragma user_version=%d;" % (uv, )) + blank() + + # Save it all + self.write(self.stdout, "COMMIT TRANSACTION;\n") + + # cleanup pragmas + if foreigns: + blank() + comment("Restoring foreign key checking back on. Note that SQLite 3.6.19 is off by default") + self.write(self.stdout, "PRAGMA foreign_keys=ON;\n") + if virtuals: + blank() + comment("Restoring writable schema back to default") + self.write(self.stdout, "PRAGMA writable_schema=OFF;\n") + # schema reread + blank() + comment("We need to force SQLite to reread the schema because otherwise it doesn't know that " + "the virtual tables we inserted directly into sqlite_master exist. See " + "last comments of https://sqlite.org/cvstrac/tktview?tn=3425") + self.write(self.stdout, "BEGIN;\nCREATE TABLE no_such_table(x,y,z);\nROLLBACK;\n") + + finally: + self.process_sql("END", internal=True) + + def command_echo(self, cmd): + """echo ON|OFF: If ON then each SQL statement or command is printed before execution (default OFF) + + The SQL statement or command is sent to error output so that + it is not intermingled with regular output. + """ + self.echo = self._boolean_command("echo", cmd) + + def set_encoding(self, enc): + """Saves *enc* as the default encoding, after verifying that + it is valid. You can also include :error to specify error + handling - eg 'cp437:replace' + + Raises an exception on invalid encoding or error + """ + enc = enc.split(":", 1) + if len(enc) > 1: + enc, errors = enc + else: + enc = enc[0] + errors = None + try: + codecs.lookup(enc) + except LookupError: + raise self.Error("No known encoding '%s'" % (enc, )) + try: + if errors is not None: + codecs.lookup_error(errors) + except LookupError: + raise self.Error("No known codec error handler '%s'" % (errors, )) + self.encoding = enc, errors + + def command_encoding(self, cmd): + """encoding ENCODING: Set the encoding used for new files opened via .output and imports + + SQLite and APSW work internally using Unicode and characters. + Files however are a sequence of bytes. An encoding describes + how to convert between bytes and characters. The default + encoding is utf8 and that is generally the best value to use + when other programs give you a choice. + + You can also specify an error handler. For example + 'cp437:replace' will use code page 437 and any Unicode + codepoints not present in cp437 will be replaced (typically + with something like a question mark). Other error handlers + include 'ignore', 'strict' (default) and 'xmlcharrefreplace'. + + For the default input/output/error streams on startup the + shell defers to Python's detection of encoding. For example + on Windows it asks what code page is in use and on Unix it + looks at the LC_CTYPE environment variable. You can set the + PYTHONIOENCODING environment variable to override this + detection. + + This command affects files opened after setting the encoding + as well as imports. + + See the online APSW documentation for more details. + """ + if len(cmd) != 1: + raise self.Error("Encoding takes one argument") + self.set_encoding(cmd[0]) + + def command_exceptions(self, cmd): + """exceptions ON|OFF: If ON then detailed tracebacks are shown on exceptions (default OFF) + + Normally when an exception occurs the error string only is + displayed. However it is sometimes useful to get a full + traceback. An example would be when you are developing + virtual tables and using the shell to exercise them. In + addition to displaying each stack frame, the local variables + within each frame are also displayed. + """ + self.exceptions = self._boolean_command("exceptions", cmd) + + def command_exit(self, cmd): + """exit:Exit this program""" + if len(cmd): + raise self.Error("Exit doesn't take any parameters") + sys.exit(0) + + def command_quit(self, cmd): + """quit:Exit this program""" + if len(cmd): + raise self.Error("Quit doesn't take any parameters") + sys.exit(0) + + def command_explain(self, cmd): + """explain ON|OFF: Set output mode suitable for explain (default OFF) + + Explain shows the underlying SQLite virtual machine code for a + statement. You need to prefix the SQL with explain. For example: + + explain select * from table; + + This output mode formats the explain output nicely. If you do + '.explain OFF' then the output mode and settings in place when + you did '.explain ON' are restored. + """ + if len(cmd) == 0 or self._boolean_command("explain", cmd): + self.push_output() + self.header = True + self.widths = [4, 13, 4, 4, 4, 13, 2, 13] + self.truncate = False + self.output = self.output_column + else: + self.pop_output() + + def command_find(self, cmd): + """find what ?TABLE?: Searches all columns of all tables for a value + + The find command helps you locate data across your database + for example to find a string or any references to an id. + + You can specify a like pattern to limit the search to a subset + of tables (eg specifying 'CUSTOMER%' for all tables beginning + with CUSTOMER). + + The what value will be treated as a string and/or integer if + possible. If what contains % or _ then it is also treated as + a like pattern. + + This command will take a long time to execute needing to read + all of the relevant tables. + """ + if len(cmd) < 1 or len(cmd) > 2: + raise self.Error("At least one argument required and at most two accepted") + tablefilter = "%" + if len(cmd) == 2: + tablefilter = cmd[1] + querytemplate = [] + queryparams = [] + + def qp(): # binding for current queryparams + return "?" + str(len(queryparams)) + + s = cmd[0] + if '%' in s or '_' in s: + queryparams.append(s) + querytemplate.append("%s LIKE " + qp()) + queryparams.append(s) + querytemplate.append("%s = " + qp()) + try: + i = int(s) + queryparams.append(i) + querytemplate.append("%s = " + qp()) + except ValueError: + pass + querytemplate = " OR ".join(querytemplate) + for (table, ) in self.db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name LIKE ?1", + (tablefilter, )): + t = self._fmt_sql_identifier(table) + query = "SELECT * from %s WHERE " % (t, ) + colq = [] + for _, column, _, _, _, _ in self.db.execute("pragma table_info(%s)" % (t, )): + colq.append(querytemplate % ((self._fmt_sql_identifier(column), ) * len(queryparams))) + query = query + " OR ".join(colq) + self.process_sql(query, queryparams, internal=True, summary=("Table " + table + "\n", "\n")) + + def command_header(self, cmd): + """header(s) ON|OFF: Display the column names in output (default OFF) + + """ + self.header = self._boolean_command("header", cmd) + + command_headers = command_header + + _help_info = None + + def command_help(self, cmd): + """help ?COMMAND?: Shows list of commands and their usage. If COMMAND is specified then shows detail about that COMMAND. ('.help all' will show detailed help about all commands.) + """ + if not self._help_info: + # buildup help database + self._help_info = {} + for c in dir(self): + if not c.startswith("command_"): + continue + # help is 3 parts + # - the syntax string (eg backup ?dbname? filename) + # - the one liner description (eg saves database to filename) + # - the multi-liner detailed description + # We grab this from the doc string for the function in the form + # syntax: one liner\nmulti\nliner + d = getattr(self, c).__doc__ + assert d, c + " command must have documentation" + c = c[len("command_"):] + if c in ("headers", "color"): continue + while d[0] == "\n": + d = d[1:] + parts = d.split("\n", 1) + firstline = parts[0].strip().split(":", 1) + assert len(firstline) == 2, c + " command must have usage: description doc" + if len(parts) == 1 or len(parts[1].strip()) == 0: # work around textwrap bug + multi = "" + else: + multi = textwrap.dedent(parts[1]) + if c == "mode": + if not self._output_modes: + self._cache_output_modes() + firstline[1] = firstline[1] + " " + " ".join(self._output_modes) + multi = multi + "\n\n" + "\n\n".join(self._output_modes_detail) + if c == "colour": + colours = list(self._colours.keys()) + colours.sort() + firstline[1] = firstline[1] + " from " + ", ".join(colours) + if len(multi.strip()) == 0: # All whitespace + multi = None + else: + multi = multi.strip("\n") + # we need to keep \n\n as a newline but turn all others into spaces + multi = multi.replace("\n\n", "\x00") + multi = multi.replace("\n", " ") + multi = multi.replace("\x00", "\n\n") + multi = multi.split("\n\n") + self._help_info[c] = ('.' + firstline[0].strip(), firstline[1].strip(), multi) + + self.write(self.stderr, "\n") + + tw = self._terminal_width() + if tw < 32: + tw = 32 + if len(cmd) == 0: + commands = list(self._help_info.keys()) + commands.sort() + w = 0 + for command in commands: + if len(self._help_info[command][0]) > w: + w = len(self._help_info[command][0]) + out = [] + for command in commands: + hi = self._help_info[command] + # usage string + out.append(hi[0]) + # space padding (including 2 for between columns) + out.append(" " * (2 + w - len(hi[0]))) + # usage message wrapped if need be + out.append(("\n" + " " * (2 + w)).join(textwrap.wrap(hi[1], tw - w - 2))) + # newline + out.append("\n") + self.write(self.stderr, "".join(out)) + else: + if cmd[0] == "all": + cmd = list(self._help_info.keys()) + cmd.sort() + w = 0 + for command in self._help_info: + if len(self._help_info[command][0]) > w: + w = len(self._help_info[command][0]) + + for command in cmd: + if command == "headers": command = "header" + if command not in self._help_info: + raise self.Error("No such command \"%s\"" % (command, )) + out = [] + hi = self._help_info[command] + # usage string + out.append(hi[0]) + # space padding (2) + out.append(" " * (2 + w - len(hi[0]))) + # usage message wrapped if need be + out.append(("\n" + " " * (2 + w)).join(textwrap.wrap(hi[1], tw - w - 2)) + "\n") + if hi[2]: + # newlines + out.append("\n") + # detailed message + for i, para in enumerate(hi[2]): + out.append(textwrap.fill(para, tw) + "\n") + if i < len(hi[2]) - 1: + out.append("\n") + # if not first one then print separator header + if command != cmd[0]: + self.write(self.stderr, "\n" + "=" * tw + "\n") + self.write(self.stderr, "".join(out)) + self.write(self.stderr, "\n") + + def command_import(self, cmd): + """import FILE TABLE: Imports separated data from FILE into TABLE + + Reads data from the file into the named table using the + current separator and encoding. For example if the separator + is currently a comma then the file should be CSV (comma + separated values). + + All values read in are supplied to SQLite as strings. If you + want SQLite to treat them as other types then declare your + columns appropriately. For example declaring a column 'REAL' + will result in the values being stored as floating point if + they can be safely converted. See this page for more details: + + https://sqlite.org/datatype3.html + + Another alternative is to create a temporary table, insert the + values into that and then use casting. + + CREATE TEMPORARY TABLE import(a,b,c); + + .import filename import + + CREATE TABLE final AS SELECT cast(a as BLOB), cast(b as INTEGER), cast(c as CHAR) from import; + + DROP TABLE import; + + You can also get more sophisticated using the SQL CASE + operator. For example this will turn zero length strings into + null: + + SELECT CASE col WHEN '' THEN null ELSE col END FROM ... + """ + if len(cmd) != 2: + raise self.Error("import takes two parameters") + + try: + final = None + # start transaction so database can't be changed + # underneath us + self.db.execute("BEGIN IMMEDIATE") + final = "ROLLBACK" + + # how many columns? + ncols = len(self.db.execute("pragma table_info(" + self._fmt_sql_identifier(cmd[1]) + + ")").fetchall()) + if ncols < 1: + raise self.Error("No such table '%s'" % (cmd[1], )) + + cur = self.db.cursor() + sql = "insert into %s values(%s)" % (self._fmt_sql_identifier(cmd[1]), ",".join("?" * ncols)) + + kwargs = {} + if self.separator == ",": + kwargs["dialect"] = "excel" + elif self.separator == "\t": + kwargs["dialect"] = "excel-tab" + else: + kwargs["quoting"] = csv.QUOTE_NONE + kwargs["delimiter"] = self.separator + kwargs["doublequote"] = False + kwargs["quotechar"] = "\x00" + row = 1 + for line in self._csvin_wrapper(cmd[0], kwargs): + if len(line) != ncols: + raise self.Error("row %d has %d columns but should have %d" % (row, len(line), ncols)) + try: + cur.execute(sql, line) + except: + self.write(self.stderr, "Error inserting row %d" % (row, )) + raise + row += 1 + self.db.execute("COMMIT") + + except: + if final: + self.db.execute(final) + raise + + def _csvin_wrapper(self, filename, dialect): + # Returns a csv reader that works around python bugs and uses + # dialect dict to configure reader + thefile = codecs.open(filename, "r", self.encoding[0]) + for line in csv.reader(thefile, **dialect.copy()): + yield line + thefile.close() + return + + def command_autoimport(self, cmd): + """autoimport FILENAME ?TABLE?: Imports filename creating a table and automatically working out separators and data types (alternative to .import command) + + The import command requires that you precisely pre-setup the + table and schema, and set the data separators (eg commas or + tabs). In many cases this information can be automatically + deduced from the file contents which is what this command + does. There must be at least two columns and two rows. + + If the table is not specified then the basename of the file + will be used. + + Additionally the type of the contents of each column is also + deduced - for example if it is a number or date. Empty values + are turned into nulls. Dates are normalized into YYYY-MM-DD + format and DateTime are normalized into ISO8601 format to + allow easy sorting and searching. 4 digit years must be used + to detect dates. US (swapped day and month) versus rest of + the world is also detected providing there is at least one + value that resolves the ambiguity. + + Care is taken to ensure that columns looking like numbers are + only treated as numbers if they do not have unnecessary + leading zeroes or plus signs. This is to avoid treating phone + numbers and similar number like strings as integers. + + This command can take quite some time on large files as they + are effectively imported twice. The first time is to + determine the format and the types for each column while the + second pass actually imports the data. + """ + if len(cmd) < 1 or len(cmd) > 2: + raise self.Error("Expected one or two parameters") + if not os.path.exists(cmd[0]): + raise self.Error("File \"%s\" does not exist" % (cmd[0], )) + if len(cmd) == 2: + tablename = cmd[1] + else: + tablename = None + try: + final = None + c = self.db.cursor() + c.execute("BEGIN IMMEDIATE") + final = "ROLLBACK" + + if not tablename: + tablename = os.path.splitext(os.path.basename(cmd[0]))[0] + + if c.execute("pragma table_info(%s)" % (self._fmt_sql_identifier(tablename), )).fetchall(): + raise self.Error("Table \"%s\" already exists" % (tablename, )) + + # The types we support deducing + def DateUS(v): # US formatted date with wrong ordering of day and month + return DateWorld(v, switchdm=True) + + def DateWorld(v, switchdm=False): # Sensibly formatted date as used anywhere else in the world + y, m, d = self._getdate(v) + if switchdm: m, d = d, m + if m < 1 or m > 12 or d < 1 or d > 31: + raise ValueError + return "%d-%02d-%02d" % (y, m, d) + + def DateTimeUS(v): # US date and time + return DateTimeWorld(v, switchdm=True) + + def DateTimeWorld(v, switchdm=False): # Sensible date and time + y, m, d, h, M, s = self._getdatetime(v) + if switchdm: m, d = d, m + if m < 1 or m > 12 or d < 1 or d > 31 or h < 0 or h > 23 or M < 0 or M > 59 or s < 0 or s > 65: + raise ValueError + return "%d-%02d-%02dT%02d:%02d:%02d" % (y, m, d, h, M, s) + + def Number(v): # we really don't want phone numbers etc to match + # Python's float & int constructors allow whitespace which we don't + if re.search(r"\s", v): + raise ValueError + if v == "0": return 0 + if v[0] == "+": # idd prefix + raise ValueError + if re.match("^[0-9]+$", v): + if v[0] == "0": raise ValueError # also a phone number + return int(v) + if v[0] == "0" and not v.startswith("0."): # deceptive not a number + raise ValueError + return float(v) + + # Work out the file format + formats = [{"dialect": "excel"}, {"dialect": "excel-tab"}] + seps = ["|", ";", ":"] + if self.separator not in seps: + seps.append(self.separator) + for sep in seps: + formats.append({"quoting": csv.QUOTE_NONE, "delimiter": sep, "doublequote": False, "quotechar": "\x00"}) + possibles = [] + errors = [] + encodingissue = False + # format is copy() on every use. This appears bizarre and + # unnecessary. However Python 2.3 and 2.4 somehow manage + # to empty it if not copied. + for format in formats: + ncols = -1 + lines = 0 + try: + for line in self._csvin_wrapper(cmd[0], format.copy()): + if lines == 0: + lines = 1 + ncols = len(line) + # data type guess setup + datas = [] + for i in range(ncols): + datas.append([DateUS, DateWorld, DateTimeUS, DateTimeWorld, Number]) + allblanks = [True] * ncols + continue + if len(line) != ncols: + raise ValueError("Expected %d columns - got %d" % (ncols, len(line))) + lines += 1 + for i in range(ncols): + if not line[i]: + continue + allblanks[i] = False + if not datas[i]: + continue + # remove datas that give ValueError + d = [] + for dd in datas[i]: + try: + dd(line[i]) + d.append(dd) + except ValueError: + pass + datas[i] = d + if ncols > 1 and lines > 1: + # if a particular column was allblank then clear datas for it + for i in range(ncols): + if allblanks[i]: + datas[i] = [] + possibles.append((format.copy(), ncols, lines, datas)) + except UnicodeDecodeError: + encodingissue = True + except: + s = str(sys.exc_info()[1]) + if s not in errors: + errors.append(s) + + if len(possibles) == 0: + if encodingissue: + raise self.Error( + "The file is probably not in the current encoding \"%s\" and didn't match a known file format" % + (self.encoding[0], )) + v = "File doesn't appear to match a known type." + if len(errors): + v += " Errors reported:\n" + "\n".join([" " + e for e in errors]) + raise self.Error(v) + if len(possibles) > 1: + raise self.Error("File matches more than one type!") + format, ncols, lines, datas = possibles[0] + fmt = format.get("dialect", None) + if fmt is None: + fmt = "(delimited by \"%s\")" % (format["delimiter"], ) + self.write(self.stdout, "Detected Format %s Columns %d Rows %d\n" % (fmt, ncols, lines)) + # Header row + reader = self._csvin_wrapper(cmd[0], format) + for header in reader: + break + # Check schema + identity = lambda x: x + for i in range(ncols): + if len(datas[i]) > 1: + raise self.Error("Column #%d \"%s\" has ambiguous data format - %s" % + (i + 1, header[i], ", ".join([d.__name__ for d in datas[i]]))) + if datas[i]: + datas[i] = datas[i][0] + else: + datas[i] = identity + # Make the table + sql = "CREATE TABLE %s(%s)" % (self._fmt_sql_identifier(tablename), ", ".join( + [self._fmt_sql_identifier(h) for h in header])) + c.execute(sql) + # prep work for each row + sql = "INSERT INTO %s VALUES(%s)" % (self._fmt_sql_identifier(tablename), ",".join(["?"] * ncols)) + for line in reader: + vals = [] + for i in range(ncols): + l = line[i] + if not l: + vals.append(None) + else: + vals.append(datas[i](l)) + c.execute(sql, vals) + + c.execute("COMMIT") + self.write(self.stdout, "Auto-import into table \"%s\" complete\n" % (tablename, )) + except: + if final: + self.db.execute(final) + raise + + def _getdate(self, v): + # Returns a tuple of 3 items y,m,d from string v + m = re.match(r"^([0-9]+)[^0-9]([0-9]+)[^0-9]([0-9]+)$", v) + if not m: + raise ValueError + y, m, d = int(m.group(1)), int(m.group(2)), int(m.group(3)) + if d > 1000: # swap order + y, m, d = d, m, y + if y < 1000 or y > 9999: + raise ValueError + return y, m, d + + def _getdatetime(self, v): + # must be at least HH:MM + m = re.match(r"^([0-9]+)[^0-9]([0-9]+)[^0-9]([0-9]+)[^0-9]+([0-9]+)[^0-9]([0-9]+)([^0-9]([0-9]+))?$", v) + if not m: + raise ValueError + items = list(m.group(1, 2, 3, 4, 5, 7)) + for i in range(len(items)): + if items[i] is None: + items[i] = 0 + items = [int(i) for i in items] + if items[2] > 1000: + items = [items[2], items[1], items[0]] + items[3:] + if items[0] < 1000 or items[0] > 9999: + raise ValueError + return items + + def command_indices(self, cmd): + """indices TABLE: Lists all indices on table TABLE + + """ + if len(cmd) != 1: + raise self.Error("indices takes one table name") + self.push_output() + self.header = False + self.output = self.output_list + try: + self.process_sql( + "SELECT name FROM sqlite_master WHERE type='index' AND tbl_name LIKE ?1 " + "UNION ALL SELECT name FROM sqlite_temp_master WHERE type='index' AND tbl_name LIKE " + "?1 ORDER by name", + cmd, + internal=True) + finally: + self.pop_output() + + def command_load(self, cmd): + """load FILE ?ENTRY?: Loads a SQLite extension library + + Note: Extension loading may not be enabled in the SQLite + library version you are using. + + Extensions are an easy way to add new functions and + functionality. For a useful extension look at the bottom of + https://sqlite.org/contrib + + By default sqlite3_extension_init is called in the library but + you can specify an alternate entry point. + + If you get an error about the extension not being found you + may need to explicitly specify the directory. For example if + it is in the current directory then use: + + .load ./extension.so + """ + if len(cmd) < 1 or len(cmd) > 2: + raise self.Error("load takes one or two parameters") + try: + self.db.enableloadextension(True) + except: + raise self.Error("Extension loading is not supported") + + self.db.loadextension(*cmd) + + _output_modes = None + + def command_mode(self, cmd): + """mode MODE ?TABLE?: Sets output mode to one of""" + if len(cmd) in (1, 2): + w = cmd[0] + if w == "tabs": + w = "list" + m = getattr(self, "output_" + w, None) + if w != "insert": + if len(cmd) == 2: + raise self.Error("Output mode %s doesn't take parameters" % (cmd[0])) + if m: + self.output = m + # set some defaults + self.truncate = True + if cmd[0] == "csv": + self.separator = "," + elif cmd[0] == "tabs": + self.separator = "\t" + else: + pass + #self.separator=self._output_stack[0]["separator"] + if w == "insert": + if len(cmd) == 2: + self._output_table = cmd[1] + else: + self._output_table = "table" + self._output_table = self._fmt_sql_identifier(self._output_table) + return + if not self._output_modes: + self._cache_output_modes() + raise self.Error("Expected a valid output mode: " + ", ".join(self._output_modes)) + + # needed so command completion and help can use it + def _cache_output_modes(self): + modes = [m[len("output_"):] for m in dir(self) if m.startswith("output_")] + modes.append("tabs") + modes.sort() + self._output_modes = modes + + detail = [] + + for m in modes: + if m == 'tabs': continue + d = getattr(self, "output_" + m).__doc__ + assert d, "output mode " + m + " needs doc" + d = d.replace("\n", " ").strip() + while " " in d: + d = d.replace(" ", " ") + detail.append(m + ": " + d) + self._output_modes_detail = detail + + def command_nullvalue(self, cmd): + """nullvalue STRING: Print STRING in place of null values + + This affects textual output modes like column and list and + sets how SQL null values are shown. The default is a zero + length string. Insert mode and dumps are not affected by this + setting. You can use double quotes to supply a zero length + string. For example: + + .nullvalue "" # the default + .nullvalue # rather obvious + .nullvalue " \\t " # A tab surrounded by spaces + """ + if len(cmd) != 1: + raise self.Error("nullvalue takes exactly one parameter") + self.nullvalue = self.fixup_backslashes(cmd[0]) + + def command_open(self, cmd): + """open ?OPTIONS? ?FILE?: Closes existing database and opens a different one + + Options are: --new which deletes the file if it already exists + + If FILE is omitted then a memory database is opened + """ + new = False + dbname = None + c = cmd + while c: + p = c.pop(0) + if p.startswith("--"): + if p == "--new": + new = True + continue + raise self.Error("Unknown open param: " + p) + if dbname: + raise self.Error("Too many arguments: " + p) + dbname = p + + if new and dbname: + # close it first in case re-opening existing. windows doesn't + # allow deleting open files, tag alongs cause problems etc + # hence retry and sleeps + self.db = (None, None) + for suffix in "", "-journal", "-wal", "-shm": + fn = dbname + suffix + for retry in range(1, 5): + try: + os.remove(fn) + break + except OSError: + if not os.path.isfile(fn): + break + # under windows tag alongs can delay being able to + # delete after we have closed the file + import gc + gc.collect(2) + time.sleep(.05 * retry) + else: + os.rename(fn, fn + "-DELETEME") + + self.db = (None, dbname) + + def command_output(self, cmd): + """output FILENAME: Send output to FILENAME (or stdout) + + If the FILENAME is stdout then output is sent to standard + output from when the shell was started. The file is opened + using the current encoding (change with .encoding command). + """ + # Flush everything + self.stdout.flush() + self.stderr.flush() + if hasattr(self.stdin, "flush"): + try: + self.stdin.flush() + except IOError: # see issue 117 + pass + + # we will also close stdout but only do so once we have a + # replacement so that stdout is always valid + + if len(cmd) != 1: + raise self.Error("You must specify a filename") + + try: + fname = cmd[0] + if fname == "stdout": + old = None + if self.stdout != self._original_stdout: + old = self.stdout + self.stdout = self._original_stdout + if old is not None: # done here in case close raises exception + old.close() + return + + newf = codecs.open(fname, "w", self.encoding[0], self.encoding[1]) + old = None + if self.stdout != self._original_stdout: + old = self.stdout + self.stdout = newf + if old is not None: + old.close() + finally: + self._out_colour() + + def command_print(self, cmd): + """print STRING: print the literal STRING + + If more than one argument is supplied then they are printed + space separated. You can use backslash escapes such as \\n + and \\t. + """ + self.write(self.stdout, " ".join([self.fixup_backslashes(i) for i in cmd]) + "\n") + + def command_prompt(self, cmd): + """prompt MAIN ?CONTINUE?: Changes the prompts for first line and continuation lines + + The default is to print 'sqlite> ' for the main prompt where + you can enter a dot command or a SQL statement. If the SQL + statement is complete (eg not ; terminated) then you are + prompted for more using the continuation prompt which defaults + to ' ..> '. Example: + + .prompt "Yes, Master> " "More, Master> " + + You can use backslash escapes such as \\n and \\t. + """ + if len(cmd) < 1 or len(cmd) > 2: + raise self.Error("prompt takes one or two arguments") + self.prompt = self.fixup_backslashes(cmd[0]) + if len(cmd) == 2: + self.moreprompt = self.fixup_backslashes(cmd[1]) + + def command_read(self, cmd): + """read FILENAME: Processes SQL and commands in FILENAME (or Python if FILENAME ends with .py) + + Treats the specified file as input (a mixture or SQL and/or + dot commands). If the filename ends in .py then it is treated + as Python code instead. + + For Python code the symbol 'shell' refers to the instance of + the shell and 'apsw' is the apsw module. + """ + if len(cmd) != 1: + raise self.Error("read takes a single filename") + if cmd[0].lower().endswith(".py"): + g = {} + g.update({'apsw': apsw, 'shell': self}) + # compile step is needed to associate name with code + f = open(cmd[0], "rb") + try: + exec(compile(f.read(), cmd[0], 'exec'), g, g) + finally: + f.close() + else: + f = codecs.open(cmd[0], "r", self.encoding[0]) + try: + try: + self.push_input() + self.stdin = f + self.interactive = False + self.input_line_number = 0 + while True: + line = self.getcompleteline() + if line is None: + break + self.process_complete_line(line) + except: + eval = sys.exc_info()[1] + if not isinstance(eval, SystemExit): + self._append_input_description() + raise + + finally: + self.pop_input() + f.close() + + def command_restore(self, cmd): + """restore ?DB? FILE: Restore database from FILE into DB (default "main") + + Copies the contents of FILE to the current database (default "main"). + The backup is done at the page level - SQLite copies the pages as + is. There is no round trip through SQL code. + """ + dbname = "main" + if len(cmd) == 1: + fname = cmd[0] + elif len(cmd) == 2: + dbname = cmd[0] + fname = cmd[1] + else: + raise self.Error("Restore takes one or two parameters") + input = apsw.Connection(fname) + b = self.db.backup(dbname, input, "main") + try: + while not b.done: + b.step() + finally: + b.finish() + input.close() + + def command_schema(self, cmd): + """schema ?TABLE? [TABLE...]: Shows SQL for table + + If you give one or more tables then their schema is listed + (including indices). If you don't specify any then all + schemas are listed. TABLE is a like pattern so you can % for + wildcards. + """ + self.push_output() + self.output = self.output_list + self.header = False + try: + if len(cmd) == 0: + cmd = ['%'] + for n in cmd: + self.process_sql( + "SELECT sql||';' FROM " + "(SELECT sql sql, type type, tbl_name tbl_name, name name " + "FROM sqlite_master UNION ALL " + "SELECT sql, type, tbl_name, name FROM sqlite_temp_master) " + "WHERE tbl_name LIKE ?1 AND type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%' " + "ORDER BY substr(type,2,1), name", (n, ), + internal=True) + finally: + self.pop_output() + + def command_separator(self, cmd): + """separator STRING: Change separator for output mode and .import + + You can use quotes and backslashes. For example to set the + separator to space tab space you can use: + + .separator " \\t " + + The setting is automatically changed when you switch to csv or + tabs output mode. You should also set it before doing an + import (ie , for CSV and \\t for TSV). + """ + if len(cmd) != 1: + raise self.Error("separator takes exactly one parameter") + self.separator = self.fixup_backslashes(cmd[0]) + + _shows = ("echo", "explain", "headers", "mode", "nullvalue", "output", "separator", "width", "exceptions", + "encoding") + + def command_show(self, cmd): + """show: Show the current values for various settings.""" + if len(cmd) > 1: + raise self.Error("show takes at most one parameter") + if len(cmd): + what = cmd[0] + if what not in self._shows: + raise self.Error("Unknown show: '%s'" % (what, )) + else: + what = None + + outs = [] + for i in self._shows: + k = i + if what and i != what: + continue + # boolean settings + if i in ("echo", "headers", "exceptions"): + if i == "headers": i = "header" + v = "off" + if getattr(self, i): + v = "on" + elif i == "explain": + # we cheat by looking at truncate setting! + v = "on" + if self.truncate: + v = "off" + elif i in ("nullvalue", "separator"): + v = self._fmt_c_string(getattr(self, i)) + elif i == "mode": + if not self._output_modes: + self._cache_output_modes() + for v in self._output_modes: + if self.output == getattr(self, "output_" + v): + break + else: + assert False, "Bug: didn't find output mode" + elif i == "output": + if self.stdout is self._original_stdout: + v = "stdout" + else: + v = getattr(self.stdout, "name", "") + elif i == "width": + v = " ".join(["%d" % (i, ) for i in self.widths]) + elif i == "encoding": + v = self.encoding[0] + if self.encoding[1]: + v += " (Errors " + self.encoding[1] + ")" + else: + assert False, "Bug: unknown show handling" + outs.append((k, v)) + + # find width of k column + l = 0 + for k, v in outs: + if len(k) > l: + l = len(k) + + for k, v in outs: + self.write(self.stderr, "%*.*s: %s\n" % (l, l, k, v)) + + def command_tables(self, cmd): + """tables ?PATTERN?: Lists names of tables matching LIKE pattern + + This also returns views. + """ + self.push_output() + self.output = self.output_list + self.header = False + try: + if len(cmd) == 0: + cmd = ['%'] + + # The SQLite shell code filters out sqlite_ prefixes if + # you specified an argument else leaves them in. It also + # has a hand coded output mode that does space separation + # plus wrapping at 80 columns. + for n in cmd: + self.process_sql( + "SELECT name FROM sqlite_master " + "WHERE type IN ('table', 'view') AND name NOT LIKE 'sqlite_%' " + "AND name like ?1 " + "UNION ALL " + "SELECT name FROM sqlite_temp_master " + "WHERE type IN ('table', 'view') AND name NOT LIKE 'sqlite_%' " + "ORDER BY 1", (n, ), + internal=True) + finally: + self.pop_output() + + def command_timeout(self, cmd): + """timeout MS: Try opening locked tables for MS milliseconds + + If a database is locked by another process SQLite will keep + retrying. This sets how many thousandths of a second it will + keep trying for. If you supply zero or a negative number then + all busy handlers are disabled. + """ + if len(cmd) != 1: + raise self.Error("timeout takes a number") + try: + t = int(cmd[0]) + except: + raise self.Error("%s is not a number" % (cmd[0], )) + self.db.setbusytimeout(t) + + def command_timer(self, cmd): + """timer ON|OFF: Control printing of time and resource usage after each query + + The values displayed are in seconds when shown as floating + point or an absolute count. Only items that have changed + since starting the query are shown. On non-Windows platforms + considerably more information can be shown. + """ + if self._boolean_command("timer", cmd): + try: + self.get_resource_usage() + except: + raise self.Error("Timing not supported by this Python version/platform") + self.timer = True + else: + self.timer = False + + def command_width(self, cmd): + """width NUM NUM ...: Set the column widths for "column" mode + + In "column" output mode, each column is a fixed width with values truncated to + fit. Specify new widths using this command. Use a negative number + to right justify and zero for default column width. + """ + if len(cmd) == 0: + raise self.Error("You need to specify some widths!") + w = [] + for i in cmd: + try: + w.append(int(i)) + except: + raise self.Error("'%s' is not a valid number" % (i, )) + self.widths = w + + def _terminal_width(self): + """Works out the terminal width which is used for word wrapping + some output (eg .help)""" + try: + if sys.platform == "win32": + import ctypes, struct + h = ctypes.windll.kernel32.GetStdHandle(-12) # -12 is stderr + buf = ctypes.create_string_buffer(22) + if ctypes.windll.kernel32.GetConsoleScreenBufferInfo(h, buf): + _, _, _, _, _, left, top, right, bottom, _, _ = struct.unpack("hhhhHhhhhhh", buf.raw) + return right - left + raise Exception() + else: + # posix + import struct, fcntl, termios + s = struct.pack('HHHH', 0, 0, 0, 0) + x = fcntl.ioctl(2, termios.TIOCGWINSZ, s) + return struct.unpack('HHHH', x)[1] + except: + try: + v = int(os.getenv("COLUMNS")) + if v < 10: + return 80 + return v + except: + return 80 + + def push_output(self): + """Saves the current output settings onto a stack. See + :meth:`pop_output` for more details as to why you would use + this.""" + o = {} + for k in "separator", "header", "nullvalue", "output", "widths", "truncate": + o[k] = getattr(self, k) + self._output_stack.append(o) + + def pop_output(self): + """Restores most recently pushed output. There are many + output parameters such as nullvalue, mode + (list/tcl/html/insert etc), column widths, header etc. If you + temporarily need to change some settings then + :meth:`push_output`, change the settings and then pop the old + ones back. + + A simple example is implementing a command like .dump. Push + the current output, change the mode to insert so we get SQL + inserts printed and then pop to go back to what was there + before. + + """ + # first item should always be present + assert len(self._output_stack) + if len(self._output_stack) == 1: + o = self._output_stack[0] + else: + o = self._output_stack.pop() + for k, v in o.items(): + setattr(self, k, v) + + def _append_input_description(self): + """When displaying an error in :meth:`handle_exception` we + want to give context such as when the commands being executed + came from a .read command (which in turn could execute another + .read). + """ + if self.interactive: + return + res = [] + res.append("Line %d" % (self.input_line_number, )) + res.append(": " + getattr(self.stdin, "name", "")) + self._input_descriptions.append(" ".join(res)) + + def fixup_backslashes(self, s): + """Implements the various backlash sequences in s such as + turning backslash t into a tab. + + This function is needed because shlex does not do it for us. + """ + if "\\" not in s: return s + # See the resolve_backslashes function in SQLite shell source + res = [] + i = 0 + while i < len(s): + if s[i] != "\\": + res.append(s[i]) + i += 1 + continue + i += 1 + if i >= len(s): + raise self.Error("Backslash with nothing following") + c = s[i] + res.append({"\\": "\\", "r": "\r", "n": "\n", "t": "\t"}.get(c, None)) + i += 1 # advance again + if res[-1] is None: + raise self.Error("Unknown backslash sequence \\" + c) + return "".join(res) + + def write(self, dest, text): + "Writes text to dest. dest will typically be one of self.stdout or self.stderr." + dest.write(text) + + _raw_input = input + + def getline(self, prompt=""): + """Returns a single line of input (may be incomplete SQL) from self.stdin. + + If EOF is reached then return None. Do not include trailing + newline in return. + """ + self.stdout.flush() + self.stderr.flush() + try: + if self.interactive: + if self.stdin is sys.stdin: + c = self.colour.prompt, self.colour.prompt_ + if self._using_readline: + # these are needed so that readline knows they are non-printing characters + c = "\x01" + c[0] + "\x02", "\x01" + c[1] + "\x02", + line = self._raw_input(c[0] + prompt + c[1]) + "\n" # raw_input excludes newline + else: + self.write(self.stdout, prompt) + line = self.stdin.readline() # includes newline unless last line of file doesn't have one + else: + line = self.stdin.readline() # includes newline unless last line of file doesn't have one + self.input_line_number += 1 + if sys.version_info < (3, 0): + if type(line) != unicode: + enc = getattr(self.stdin, "encoding", self.encoding[0]) + if not enc: enc = self.encoding[0] + line = line.decode(enc) + except EOFError: + return None + if len(line) == 0: # always a \n on the end normally so this is EOF + return None + if line[-1] == "\n": + line = line[:-1] + return line + + def getcompleteline(self): + """Returns a complete input. + + For dot commands it will be one line. For SQL statements it + will be as many as is necessary to have a + :meth:`~apsw.complete` statement (ie semicolon terminated). + Returns None on end of file.""" + try: + self._completion_first = True + command = self.getline(self.prompt) + if command is None: + return None + if len(command.strip()) == 0: + return "" + if command[0] == "?": command = ".help " + command[1:] + # incomplete SQL? + while command[0] != "." and not apsw.complete(command): + self._completion_first = False + line = self.getline(self.moreprompt) + if line is None: # unexpected eof + raise self.Error("Incomplete SQL (line %d of %s): %s\n" % + (self.input_line_number, getattr(self.stdin, "name", ""), command)) + if line in ("go", "/"): + break + command = command + "\n" + line + return command + except KeyboardInterrupt: + self.handle_interrupt() + return "" + + def handle_interrupt(self): + """Deal with keyboard interrupt (typically Control-C). It + will :meth:`~apsw.Connection.interrupt` the database and print"^C" if interactive.""" + self.db.interrupt() + if not self.bail and self.interactive: + self.write(self.stderr, "^C\n") + return + raise + + def process_complete_line(self, command): + """Given some text will call the appropriate method to process + it (eg :meth:`process_sql` or :meth:`process_command`)""" + try: + if len(command.strip()) == 0: + return + if command[0] == ".": + self.process_command(command) + else: + self.process_sql(command) + except KeyboardInterrupt: + self.handle_interrupt() + + def push_input(self): + """Saves the current input parameters to a stack. See :meth:`pop_input`.""" + d = {} + for i in "interactive", "stdin", "input_line_number": + d[i] = getattr(self, i) + self._input_stack.append(d) + + def pop_input(self): + """Restore most recently pushed input parameters (interactive, + self.stdin, linenumber etc). Use this if implementing a + command like read. Push the current input, read the file and + then pop the input to go back to before. + """ + assert (len(self._input_stack)) > 1 + d = self._input_stack.pop() + for k, v in d.items(): + setattr(self, k, v) + + def complete(self, token, state): + """Return a possible completion for readline + + This function is called with state starting at zero to get the + first completion, then one/two/three etc until you return None. The best + implementation is to generate the list when state==0, save it, + and provide members on each increase. + + The default implementation extracts the current full input + from readline and then calls :meth:`complete_command` or + :meth:`complete_sql` as appropriate saving the results for + subsequent calls. + """ + if state == 0: + import readline + # the whole line + line = readline.get_line_buffer() + # beginning and end(+1) of the token in line + beg = readline.get_begidx() + end = readline.get_endidx() + # Are we matching a command? + try: + if self._completion_first and line.startswith("."): + self.completions = self.complete_command(line, token, beg, end) + else: + self.completions = self.complete_sql(line, token, beg, end) + except: + # Readline swallows any exceptions we raise. We + # shouldn't be raising any so this is to catch that + import traceback + traceback.print_exc() + raise + + if state > len(self.completions): + return None + return self.completions[state] + + # Taken from https://sqlite.org/lang_keywords.html + _sqlite_keywords = """ABORT ACTION ADD AFTER ALL ALTER ANALYZE AND AS ASC ATTACH + AUTOINCREMENT BEFORE BEGIN BETWEEN BY CASCADE CASE CAST CHECK + COLLATE COLUMN COMMIT CONFLICT CONSTRAINT CREATE CROSS + CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP DATABASE DEFAULT + DEFERRABLE DEFERRED DELETE DESC DETACH DISTINCT DROP EACH ELSE END + ESCAPE EXCEPT EXCLUSIVE EXISTS EXPLAIN FAIL FOR FOREIGN FROM FULL + GLOB GROUP HAVING IF IGNORE IMMEDIATE IN INDEX INDEXED INITIALLY + INNER INSERT INSTEAD INTERSECT INTO IS ISNULL JOIN KEY LEFT LIKE + LIMIT MATCH NATURAL NO NOT NOTNULL NULL OF OFFSET ON OR ORDER + OUTER PLAN PRAGMA PRIMARY QUERY RAISE RECURSIVE REFERENCES REGEXP + REINDEX RELEASE RENAME REPLACE RESTRICT RIGHT ROLLBACK ROW + SAVEPOINT SELECT SET TABLE TEMP TEMPORARY THEN TO TRANSACTION + TRIGGER UNION UNIQUE UPDATE USING VACUUM VALUES VIEW VIRTUAL WHEN + WHERE WITH WITHOUT""".split() + + _sqlite_keywords.extend(getattr(apsw, "keywords", [])) + + # reserved words need to be quoted. Only a subset of the above are reserved + # but what the heck + _sqlite_reserved = _sqlite_keywords + # add a space after each of them except functions which get parentheses + _sqlite_keywords = [x + (" ", "(")[x in ("VALUES", "CAST")] for x in _sqlite_keywords] + + _sqlite_special_names = """_ROWID_ OID ROWID SQLITE_MASTER + SQLITE_SEQUENCE""".split() + + _sqlite_functions = """abs( changes() char( coalesce( glob( ifnull( hex( instr( + last_insert_rowid() length( like( likelihood( likely( + load_extension( lower( ltrim( max( min( nullif( printf( + quote( random() randomblob( replace( round( rtrim( soundex( + sqlite_compileoption_get( sqlite_compileoption_used( + sqlite_source_id() sqlite_version() substr( total_changes() + trim( typeof( unlikely( unicode( upper( zeroblob( date( + time( datetime( julianday( strftime( avg( count( + group_concat( sum( total(""".split() + + _pragmas_bool = ("yes", "true", "on", "no", "false", "off") + _pragmas = { + "application_id": None, + "auto_vacuum=": ("NONE", "FULL", "INCREMENTAL"), + "automatic_index=": _pragmas_bool, + "busy_timeout=": None, + "cache_size=": None, + "case_sensitive_like=": _pragmas_bool, + "cache_spill=": _pragmas_bool, + "cell_size_check=": _pragmas_bool, + "checkpoint_fullfsync=": _pragmas_bool, + "collation_list": None, + "compile_options": None, + "data_version": None, + "database_list": None, + "defer_foreign_keys=": _pragmas_bool, + "encoding=": None, + # ('"UTF-8"', '"UTF-16"', '"UTF-16le"', '"UTF16-16be"'), + # too hard to get " to be part of token just in this special case + "foreign_key_check": None, + "foreign_key_list(": None, + "foreign_keys": _pragmas_bool, + "freelist_count": None, + "fullfsync=": _pragmas_bool, + "ignore_check_constraints": _pragmas_bool, + "incremental_vacuum(": None, + "index_info(": None, + "index_list(": None, + "index_xinfo(": None, + "integrity_check": None, + "journal_mode=": ("DELETE", "TRUNCATE", "PERSIST", "MEMORY", "OFF", "WAL"), + "journal_size_limit=": None, + "legacy_file_format=": _pragmas_bool, + "locking_mode=": ("NORMAL", "EXCLUSIVE"), + "max_page_count=": None, + "mmap_size=": None, + "optimize(": None, + "page_count;": None, + "page_size=": None, + "query_only=": _pragmas_bool, + "quick_check": None, + "read_uncommitted=": _pragmas_bool, + "recursive_triggers=": _pragmas_bool, + "reverse_unordered_selects=": _pragmas_bool, + "schema_version": None, + "secure_delete=": _pragmas_bool, + "shrink_memory": None, + "soft_heap_limit=": None, + "synchronous=": ("OFF", "NORMAL", "FULL"), + "table_info(": None, + "table_list": None, + "temp_store=": ("DEFAULT", "FILE", "MEMORY"), + "threads=": None, + "user_version=": None, + "wal_autocheckpoint=": None, + "wal_checkpoint": None, + "writable_schema": _pragmas_bool, + } + + def _get_prev_tokens(self, line, end): + "Returns the tokens prior to pos end in the line" + return re.findall(r'"?\w+"?', line[:end]) + + def complete_sql(self, line, token, beg, end): + """Provide some completions for SQL + + :param line: The current complete input line + :param token: The word readline is looking for matches + :param beg: Integer offset of token in line + :param end: Integer end of token in line + :return: A list of completions, or an empty list if none + """ + if self._completion_cache is None: + cur = self.db.cursor() + collations = [row[1] for row in cur.execute("pragma collation_list")] + databases = [row[1] for row in cur.execute("pragma database_list")] + other = [] + for db in databases: + if db == "temp": + master = "sqlite_temp_master" + else: + master = "[%s].sqlite_master" % (db, ) + for row in cur.execute("select * from " + master).fetchall(): + for col in (1, 2): + if row[col] not in other and not row[col].startswith("sqlite_"): + other.append(row[col]) + if row[0] == "table": + try: + for table in cur.execute("pragma [%s].table_info([%s])" % ( + db, + row[1], + )).fetchall(): + if table[1] not in other: + other.append(table[1]) + for item in table[2].split(): + if item not in other: + other.append(item) + except apsw.SQLError: + # See https://github.com/rogerbinns/apsw/issues/86 + pass + functions = {} + for row in cur.execute("pragma function_list"): + name = row[0] + narg = row[4] + functions[name] = max(narg, functions.get(name, -1)) + + def fmtfunc(name, nargs): + if nargs == 0: + return name + "()" + return name + "(" + + func_list = [fmtfunc(name, narg) for name, narg in functions.items()] + + self._completion_cache = [ + self._sqlite_keywords, func_list, self._sqlite_special_names, collations, databases, other + ] + for i in range(len(self._completion_cache)): + self._completion_cache[i].sort() + + # be somewhat sensible about pragmas + if "pragma " in line.lower(): + t = self._get_prev_tokens(line.lower(), end) + + # pragma foo = bar + if len(t) > 2 and t[-3] == "pragma": + # t[-2] should be a valid one + for p in self._pragmas: + if p.replace("=", "") == t[-2]: + vals = self._pragmas[p] + if not vals: + return [] + return [x + ";" for x in vals if x.startswith(token)] + # at equals? + if len(t) > 1 and t[-2] == "pragma" and line[:end].replace(" ", "").endswith("="): + for p in self._pragmas: + if p.replace("=", "") == t[-1]: + vals = self._pragmas[p] + if not vals: + return [] + return vals + # pragma foo + if len(t) > 1 and t[-2] == "pragma": + res = [x for x in self._pragmas.keys() if x.startswith(token)] + res.sort() + return res + + # pragma + if len(t) and t[-1] == "pragma": + res = list(self._pragmas.keys()) + res.sort() + return res + + # This is currently not context sensitive (eg it doesn't look + # to see if last token was 'FROM' and hence next should only + # be table names. That is a SMOP like pragmas above + res = [] + ut = token.upper() + for corpus in self._completion_cache: + for word in corpus: + if word.upper().startswith(ut): + # potential match - now match case + if word.startswith(token): # exact + if word not in res: + res.append(word) + elif word.lower().startswith(token): # lower + if word.lower() not in res: + res.append(word.lower()) + elif word.upper().startswith(token): # upper + if word.upper() not in res: + res.append(word.upper()) + else: + # match letter by letter otherwise readline mangles what was typed in + w = token + word[len(token):] + if w not in res: + res.append(w) + return res + + _builtin_commands = None + + def complete_command(self, line, token, beg, end): + """Provide some completions for dot commands + + :param line: The current complete input line + :param token: The word readline is looking for matches + :param beg: Integer offset of token in line + :param end: Integer end of token in line + :return: A list of completions, or an empty list if none + """ + if not self._builtin_commands: + self._builtin_commands = [ + "." + x[len("command_"):] for x in dir(self) if x.startswith("command_") and x != "command_headers" + ] + if beg == 0: + # some commands don't need a space because they take no + # params but who cares? + return [x + " " for x in self._builtin_commands if x.startswith(token)] + return None + + def get_resource_usage(self): + """Return a dict of various numbers (ints or floats). The + .timer command shows the difference between before and after + results of what this returns by calling :meth:`display_timing`""" + if sys.platform == "win32": + import ctypes, time, platform + ctypes.windll.kernel32.GetProcessTimes.argtypes = [ + platform.architecture()[0] == '64bit' and ctypes.c_int64 or ctypes.c_int32, ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p + ] + + # All 4 out params have to be present. FILETIME is really + # just a 64 bit quantity in 100 nanosecond granularity + dummy = ctypes.c_ulonglong() + utime = ctypes.c_ulonglong() + stime = ctypes.c_ulonglong() + rc = ctypes.windll.kernel32.GetProcessTimes( + ctypes.windll.kernel32.GetCurrentProcess(), + ctypes.byref(dummy), # creation time + ctypes.byref(dummy), # exit time + ctypes.byref(stime), + ctypes.byref(utime)) + if rc: + return { + 'Wall clock': time.time(), + 'User time': float(utime.value) / 10000000, + 'System time': float(stime.value) / 10000000 + } + return {} + else: + import resource, time + r = resource.getrusage(resource.RUSAGE_SELF) + res = {'Wall clock': time.time()} + for i, desc in ( + ("utime", "User time"), + ("stime", "System time"), + ("maxrss", "Max rss"), + ("idrss", "Memory"), + ("isrss", "Stack"), + ("ixrss", "Shared Memory"), + ("minflt", "PF (no I/O)"), + ("majflt", "PF (I/O)"), + ("inblock", "Blocks in"), + ("oublock", "Blocks out"), + ("nsignals", "Signals"), + ("nvcsw", "Voluntary context switches"), + ("nivcsw", "Involunary context switches"), + ("msgrcv", "Messages received"), + ("msgsnd", "Messages sent"), + ("nswap", "Swaps"), + ): + f = "ru_" + i + if hasattr(r, f): + res[desc] = getattr(r, f) + return res + + def display_timing(self, b4, after): + """Writes the difference between b4 and after to self.stderr. + The data is dictionaries returned from + :meth:`get_resource_usage`.""" + v = list(b4.keys()) + for i in after: + if i not in v: + v.append(i) + v.sort() + for k in v: + if k in b4 and k in after: + one = b4[k] + two = after[k] + val = two - one + if val: + if type(val) == float: + self.write(self.stderr, "+ %s: %.4f\n" % (k, val)) + else: + self.write(self.stderr, "+ %s: %d\n" % (k, val)) + + ### Colour support + + def _out_colour(self): + # Sets up color for output. Input being interactive doesn't + # matter. This method needs to be called on all changes to + # output. + if getattr(self.stdout, "isatty", False) and self.stdout.isatty(): + self.colour = self._colours[self.colour_scheme] + else: + self.colour = self._colours["off"] + + # This class returns an empty string for all undefined attributes + # so that it doesn't matter if a colour scheme leaves something + # out. + class _colourscheme: + + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + def __nonzero__(self): + return True + + def __str__(self): + return "_colourscheme(" + str(self.__dict__) + ")" + + def __getattr__(self, k): + return "" + + def colour_value(self, val, formatted): + c = self.colour + if val is None: + return self.vnull + formatted + self.vnull_ + if isinstance(val, Shell._basestring): + return self.vstring + formatted + self.vstring_ + if isinstance(val, Shell._binary_type): + return self.vblob + formatted + self.vblob_ + # must be a number - we don't distinguish between float/int + return self.vnumber + formatted + self.vnumber_ + + # The colour definitions - the convention is the name to turn + # something on and the name with an underscore suffix to turn it + # off + d = _colourscheme(**dict([(v, "\x1b[" + str(n) + "m") for n, v in { + 0: "reset", + 1: "bold", + 4: "underline", + 22: "bold_", + 24: "underline_", + 7: "inverse", + 27: "inverse_", + 30: "fg_black", + 31: "fg_red", + 32: "fg_green", + 33: "fg_yellow", + 34: "fg_blue", + 35: "fg_magenta", + 36: "fg_cyan", + 37: "fg_white", + 39: "fg_", + 40: "bg_black", + 41: "bg_red", + 42: "bg_green", + 43: "bg_yellow", + 44: "bg_blue", + 45: "bg_magenta", + 46: "bg_cyan", + 47: "bg_white", + 49: "bg_" + }.items()])) + + _colours = {"off": _colourscheme(colour_value=lambda x, y: y)} + + _colours["default"] = _colourscheme(prompt=d.bold, + prompt_=d.bold_, + error=d.fg_red + d.bold, + error_=d.bold_ + d.fg_, + intro=d.fg_blue + d.bold, + intro_=d.bold_ + d.fg_, + summary=d.fg_blue + d.bold, + summary_=d.bold_ + d.fg_, + header=d.underline, + header_=d.underline_, + vnull=d.fg_red, + vnull_=d.fg_, + vstring=d.fg_yellow, + vstring_=d.fg_, + vblob=d.fg_blue, + vblob_=d.fg_, + vnumber=d.fg_magenta, + vnumber_=d.fg_) + # unpollute namespace + del d + del _colourscheme + try: + del n + del x + del v + except: + pass + + +def main() -> None: + # Docstring must start on second line so dedenting works correctly + """ + Call this to run the :ref:`interactive shell `. It + automatically passes in sys.argv[1:] and exits Python when done. + + """ + try: + s = Shell() + _, _, cmds = s.process_args(sys.argv[1:]) + if len(cmds) == 0: + s.cmdloop() + except: + v = sys.exc_info()[1] + if isinstance(v, SystemExit): + raise + if getattr(v, "_handle_exception_saw_this", False): + pass + else: + # Where did this exception come from? + import traceback + traceback.print_exc() + sys.exit(1) + + +if __name__ == '__main__': + main() diff -Nru python-apsw-3.39.2.0/apsw/speedtest.py python-apsw-3.40.0.0/apsw/speedtest.py --- python-apsw-3.39.2.0/apsw/speedtest.py 1970-01-01 00:00:00.000000000 +0000 +++ python-apsw-3.40.0.0/apsw/speedtest.py 2022-10-18 09:40:42.000000000 +0000 @@ -0,0 +1,530 @@ +#!/usr/bin/env python3 +# +# See the accompanying LICENSE file. +# +# Do speed tests. The tests try to correspond to +# https://sqlite.org/cvstrac/fileview?f=sqlite/tool/mkspeedsql.tcl +# Command line options etc were added later hence the +# somewhat weird structuring. + +import sys +import os +import random +import time +import gc +import optparse + +# Sigh +try: + maxuni = 0x10ffff + chr(maxuni) +except ValueError: + maxuni = 0xffff + +timerfn = time.process_time + + +def doit(): + random.seed(0) + options.tests = [t.strip() for t in options.tests.split(",")] + + print(" Python", sys.executable, sys.version_info) + print(" Scale", options.scale) + print(" Database", options.database) + print(" Tests", ", ".join(options.tests)) + print(" Iterations", options.iterations) + print("Statement Cache", options.scsize) + + print("\n") + if options.apsw: + import apsw + + print(" Testing with APSW file ", apsw.__file__) + print(" APSW version ", apsw.apswversion()) + print(" SQLite lib version ", apsw.sqlitelibversion()) + print(" SQLite headers version ", apsw.SQLITE_VERSION_NUMBER, end="\n\n") + + def apsw_setup(dbfile): + con = apsw.Connection(dbfile, statementcachesize=options.scsize) + con.createscalarfunction("number_name", number_name, 1) + return con + + if options.sqlite3: + import sqlite3 + + print("Testing with sqlite3 file ", sqlite3.__file__) + print(" sqlite3 version ", sqlite3.version) + print(" SQLite version ", sqlite3.sqlite_version, end="\n\n") + + def sqlite3_setup(dbfile): + con = sqlite3.connect(dbfile, isolation_level=None, cached_statements=options.scsize) + con.create_function("number_name", 1, number_name) + return con + + ones = ("zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", + "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen") + tens = ("", "ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety") + + others = ("thousand", "hundred", "zero") + + def _number_name(n): + if n >= 1000: + txt = "%s %s" % (_number_name(int(n / 1000)), others[0]) + n = n % 1000 + else: + txt = "" + + if n >= 100: + txt = txt + " " + ones[int(n / 100)] + " " + others[1] + n = n % 100 + + if n >= 20: + txt = txt + " " + tens[int(n / 10)] + n = n % 10 + + if n > 0: + txt = txt + " " + ones[n] + + txt = txt.strip() + + if txt == "": + txt = others[2] + + return txt + + def unicodify(text): + if options.unicode and len(text): + newt = [] + c = options.unicode / 100.0 + for t in text: + if random.random() > c: + newt.append(t) + continue + while True: + t = random.randint(0xa1, maxuni) + # we don't want the surrogate range or apostrophe + if t < 0xd800 or t > 0xdfff: break + newt.append(chr(t)) + text = "".join(newt) + return text + + if options.unicode: + ones = tuple([unicodify(s) for s in ones]) + tens = tuple([unicodify(s) for s in tens]) + others = tuple([unicodify(s) for s in others]) + + def number_name(n): + text = _number_name(n) + if options.size: + text = text * int(random.randint(0, options.size) / len(text)) + return text + + def getlines(scale=50, bindings=False): + random.seed(0) + + # RogerB added two pragmas so that only memory is used. This means that the + # vagaries of disk access times don't alter the results + + # database schema + for i in """PRAGMA page_size=1024; + PRAGMA cache_size=8192; + PRAGMA locking_mode=EXCLUSIVE; + PRAGMA journal_mode = OFF; + PRAGMA temp_store = MEMORY; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); + CREATE TABLE t2(a INTEGER, b INTEGER, c TEXT); + CREATE INDEX i2a ON t2(a); + CREATE INDEX i2b ON t2(b); + SELECT name FROM sqlite_master ORDER BY 1""".split(";"): + yield (i, ) + + # 50,000 inserts on an unindexed table + yield ("BEGIN", ) + for i in range(1, scale * 10000 + 1): + r = random.randint(0, 500000) + if bindings: + yield ("INSERT INTO t1 VALUES(:1, :2, number_name(:2))", (i, r)) + else: + yield ("INSERT INTO t1 VALUES(%d, %d, '%s')" % (i, r, number_name(r)), ) + yield ("COMMIT", ) + + # 50,000 inserts on an indexed table + t1c_list = [] + yield ("BEGIN", ) + for i in range(1, scale * 10000 + 1): + r = random.randint(0, 500000) + x = number_name(r) + t1c_list.append(x) + if bindings: + yield ("INSERT INTO t2 VALUES(:1, :2, number_name(:2))", (i, r)) + else: + yield ("INSERT INTO t2 VALUES(%d, %d, '%s')" % (i, r, x), ) + yield ("COMMIT", ) + + # 50 SELECTs on an integer comparison. There is no index so + # a full table scan is required. + for i in range(scale): + yield ("SELECT count(*), avg(b) FROM t1 WHERE b>=%d AND b<%d" % (i * 100, (i + 10) * 100), ) + + # 50 SELECTs on an LIKE comparison. There is no index so a full + # table scan is required. + for i in range(scale): + yield ("SELECT count(*), avg(b) FROM t1 WHERE c LIKE '%%%s%%'" % (number_name(i), ), ) + + # Create indices + yield ("BEGIN", ) + for i in """CREATE INDEX i1a ON t1(a); + CREATE INDEX i1b ON t1(b); + CREATE INDEX i1c ON t1(c);""".split(";"): + yield (i, ) + yield ("COMMIT", ) + + # 5000 SELECTs on an integer comparison where the integer is + # indexed. + for i in range(scale * 100): + yield ("SELECT count(*), avg(b) FROM t1 WHERE b>=%d AND b<%d" % (i * 100, (i + 10) * 100), ) + + # 100000 random SELECTs against rowid. + for i in range(1, scale * 2000 + 1): + yield ("SELECT c FROM t1 WHERE rowid=%d" % (1 + random.randint(0, 50000), ), ) + + # 100000 random SELECTs against a unique indexed column. + for i in range(1, scale * 2000 + 1): + yield ("SELECT c FROM t1 WHERE a=%d" % (1 + random.randint(0, 50000), ), ) + + # 50000 random SELECTs against an indexed column text column + for i in range(scale * 1000): + if bindings: + yield ( + "SELECT c FROM t1 WHERE c=?", + (random.choice(t1c_list), ), + ) + else: + yield ("SELECT c FROM t1 WHERE c='%s'" % (random.choice(t1c_list), ), ) + + # Vacuum + if options.database != ":memory:": + # opens a disk file + yield ("VACUUM", ) + + # 5000 updates of ranges where the field being compared is indexed. + yield ("BEGIN", ) + for i in range(scale * 100): + yield ("UPDATE t1 SET b=b*2 WHERE a>=%d AND a<%d" % (i * 2, (i + 1) * 2), ) + yield ("COMMIT", ) + + # 50000 single-row updates. An index is used to find the row quickly. + yield ("BEGIN", ) + for i in range(scale * 1000): + if bindings: + yield ("UPDATE t1 SET b=? WHERE a=%d" % (i, ), (random.randint(0, 500000), )) + else: + yield ("UPDATE t1 SET b=%d WHERE a=%d" % (random.randint(0, 500000), i), ) + yield ("COMMIT", ) + + # 1 big text update that touches every row in the table. + yield ("UPDATE t1 SET c=a", ) + + # Many individual text updates. Each row in the table is + # touched through an index. + yield ("BEGIN", ) + for i in range(1, scale * 1000 + 1): + if bindings: + yield ("UPDATE t1 SET c=? WHERE a=%d" % (i, ), (number_name(random.randint(0, 500000)), )) + else: + yield ("UPDATE t1 SET c='%s' WHERE a=%d" % (number_name(random.randint(0, 500000)), i), ) + yield ("COMMIT", ) + + # Delete all content in a table. + yield ("DELETE FROM t1", ) + + # Copy one table into another + yield ("INSERT INTO t1 SELECT * FROM t2", ) + + # Delete all content in a table, one row at a time. + yield ("DELETE FROM t1 WHERE 1", ) + + # Refill the table yet again + yield ("INSERT INTO t1 SELECT * FROM t2", ) + + # Drop the table and recreate it without its indices. + yield ("BEGIN", ) + yield ("DROP TABLE t1", ) + yield ("CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT)", ) + yield ("COMMIT", ) + + # Refill the table yet again. This copy should be faster because + # there are no indices to deal with. + yield ("INSERT INTO t1 SELECT * FROM t2", ) + + # The three following used "ORDER BY random()" but we can't do that + # as it causes each run to have different values, and hence different + # amounts of sorting that have to go on. The "random()" has been + # replaced by "c", the column that has the stringified number + + # Select 20000 rows from the table at random. + yield ("SELECT rowid FROM t1 ORDER BY c LIMIT %d" % (scale * 400, ), ) + + # Delete 20000 random rows from the table. + yield (""" DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY c LIMIT %d)""" % (scale * 400, ), ) + + yield ("SELECT count(*) FROM t1", ) + + # Delete 20000 more rows at random from the table. + yield ("""DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY c LIMIT %d)""" % (scale * 400, ), ) + + yield ("SELECT count(*) FROM t1", ) + + # Do a correctness test first + if options.correctness: + print("Correctness test\n") + if 'bigstmt' in options.tests: + text = ";\n".join([x[0] for x in getlines(scale=1)]) + ";" + if 'statements' in options.tests: + withbindings = [line for line in getlines(scale=1, bindings=True)] + if 'statements_nobindings' in options.tests: + withoutbindings = [line for line in getlines(scale=1, bindings=False)] + + res = {} + for driver in ('apsw', 'sqlite3'): + if not getattr(options, driver): + continue + + for test in options.tests: + name = driver + "_" + test + + print(name + '\t') + sys.stdout.flush() + + if name == 'sqlite3_bigstmt': + print('limited functionality (ignoring)\n') + continue + + con = locals().get(driver + "_setup")(":memory:") # we always correctness test on memory + + if test == 'bigstmt': + cursor = con.cursor() + if driver == 'apsw': + func = cursor.execute + else: + func = cursor.executescript + + res[name] = [row for row in func(text)] + print(str(len(res[name])) + "\n") + continue + + cursor = con.cursor() + if test == 'statements': + sql = withbindings + elif test == 'statements_nobindings': + sql = withoutbindings + + l = [] + for s in sql: + for row in cursor.execute(*s): + l.append(row) + + res[name] = l + print(str(len(res[name])) + "\n") + + # All elements of res should be identical + elements = sorted(res.keys()) + for i in range(0, len(elements) - 1): + print("%s == %s %s\n" % (elements[i], elements[i + 1], res[elements[i]] == res[elements[i + 1]])) + + del res + text = None + withbindings = None + withoutbindings = None + + if options.dump_filename or "bigstmt" in options.tests: + text = ";\n".join([x[0] for x in getlines(scale=options.scale)]) + ";" # sqlite3 requires final semicolon + if options.dump_filename: + open(options.dump_filename, "wt", encoding="utf8").write(text) + sys.exit(0) + + if "statements" in options.tests: + withbindings = list(getlines(scale=options.scale, bindings=True)) + + if "statements_nobindings" in options.tests: + withoutbindings = list(getlines(scale=options.scale, bindings=False)) + + # Each test returns the amount of time taken. Note that we include + # the close time as well. Otherwise the numbers become a function of + # cache and other collection sizes as freeing members gets deferred to + # close time. + + def apsw_bigstmt(con): + "APSW big statement" + try: + for row in con.execute(text): + pass + except: + import pdb + pdb.set_trace() + pass + + def sqlite3_bigstmt(con): + "sqlite3 big statement" + for row in con.executescript(text): + pass + + def apsw_statements(con, bindings=withbindings): + "APSW individual statements with bindings" + cursor = con.cursor() + for b in bindings: + for row in cursor.execute(*b): + pass + + def sqlite3_statements(con, bindings=withbindings): + "sqlite3 individual statements with bindings" + cursor = con.cursor() + for b in bindings: + for row in cursor.execute(*b): + pass + + def apsw_statements_nobindings(con): + "APSW individual statements without bindings" + return apsw_statements(con, withoutbindings) + + def sqlite3_statements_nobindings(con): + "sqlite3 individual statements without bindings" + return sqlite3_statements(con, withoutbindings) + + # Do the work + print("\nRunning tests - elapsed, CPU (results in seconds, lower is better)\n") + + for i in range(options.iterations): + print("%d/%d" % (i + 1, options.iterations)) + for test in options.tests: + # funky stuff is to alternate order each round + for driver in (("apsw", "sqlite3"), ("sqlite3", "apsw"))[i % 2]: + if getattr(options, driver): + name = driver + "_" + test + func = locals().get(name, None) + if not func: + sys.exit("No such test " + name + "\n") + + if os.path.exists(options.database): + os.remove(options.database) + print("\t" + func.__name__ + (" " * (40 - len(func.__name__))), end="") + con = locals().get(driver + "_setup")(options.database) + gc.collect(2) + b4cpu = timerfn() + b4 = time.time() + func(con) + con.close() # see note above as to why we include this in the timing + gc.collect(2) + after = time.time() + aftercpu = timerfn() + print("%0.3f %0.3f" % (after - b4, aftercpu - b4cpu)) + + +parser = optparse.OptionParser() +parser.add_option("--apsw", dest="apsw", action="store_true", default=False, help="Include apsw in testing (%default)") +parser.add_option("--sqlite3", action="store_true", default=False, help="Include sqlite3 module in testing (%default)") +parser.add_option("--correctness", dest="correctness", action="store_true", default=False, help="Do a correctness test") +parser.add_option( + "--scale", + dest="scale", + type="int", + default=10, + help= + "How many statements to execute. Each unit takes about 2 seconds per test on memory only databases. [Default %default]" +) +parser.add_option("--database", dest="database", default=":memory:", help="The database file to use [Default %default]") +parser.add_option("--tests", + dest="tests", + default="bigstmt,statements,statements_nobindings", + help="What tests to run [Default %default]") +parser.add_option("--iterations", + dest="iterations", + default=4, + type="int", + metavar="N", + help="How many times to run the tests [Default %default]") +parser.add_option("--tests-detail", + dest="tests_detail", + default=False, + action="store_true", + help="Print details of what the tests do. (Does not run the tests)") +parser.add_option("--dump-sql", + dest="dump_filename", + metavar="FILENAME", + help="Name of file to dump SQL to. This is useful for feeding into the SQLite command line shell.") +parser.add_option( + "--sc-size", + dest="scsize", + type="int", + default=100, + metavar="N", + help= + "Size of the statement cache. APSW will disable cache with value of zero. sqlite3 ensures a minimum of 5 [Default %default]" +) +parser.add_option("--unicode", + dest="unicode", + type="int", + default=0, + help="Percentage of text that is non-ascii unicode characters [Default %default]") +parser.add_option( + "--data-size", + dest="size", + type="int", + default=0, + metavar="SIZE", + help= + "Maximum size in characters of data items - keep this number small unless you are on 64 bits and have lots of memory with a small scale - you can easily consume multiple gigabytes [Default same as original TCL speedtest]" +) + +tests_detail = """\ +bigstmt: + + Supplies the SQL as a single string consisting of multiple + statements. apsw handles this normally via cursor.execute while + sqlite3 requires that cursor.executescript is called. The string + will be several kilobytes and with a factor of 50 will be in the + megabyte range. This is the kind of query you would run if you were + restoring a database from a dump. (Note that sqlite3 silently + ignores returned data which also makes it execute faster). + +statements: + + Runs the SQL queries but uses bindings (? parameters). eg:: + + for i in range(3): + cursor.execute("insert into table foo values(?)", (i,)) + + This test has many hits of the statement cache. + +statements_nobindings: + + Runs the SQL queries but doesn't use bindings. eg:: + + cursor.execute("insert into table foo values(0)") + cursor.execute("insert into table foo values(1)") + cursor.execute("insert into table foo values(2)") + + This test has no statement cache hits and shows the overhead of + having a statement cache. + + In theory all the tests above should run in almost identical time + as well as when using the SQLite command line shell. This tool + shows you what happens in practise. + \n""" + +if __name__ == "__main__": + options, args = parser.parse_args() + + if len(args): + parser.error("Unexpected arguments " + str(args)) + + if options.tests_detail: + print(tests_detail) + sys.exit(0) + + if not options.apsw and not options.sqlite3 and not options.dump_filename: + parser.error("You should select at least one of --apsw or --sqlite3") + + doit() diff -Nru python-apsw-3.39.2.0/apsw/tests.py python-apsw-3.40.0.0/apsw/tests.py --- python-apsw-3.39.2.0/apsw/tests.py 1970-01-01 00:00:00.000000000 +0000 +++ python-apsw-3.40.0.0/apsw/tests.py 2022-11-10 13:54:02.000000000 +0000 @@ -0,0 +1,9262 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# See the accompanying LICENSE file. + +import apsw +import apsw.shell +import sys +import os +import warnings +import platform +import typing + + +def print_version_info(): + print(" Python ", sys.executable, sys.version_info) + print("Testing with APSW file ", apsw.__file__) + print(" APSW version ", apsw.apswversion()) + print(" SQLite lib version ", apsw.sqlitelibversion()) + print("SQLite headers version ", apsw.SQLITE_VERSION_NUMBER) + print(" Using amalgamation ", apsw.using_amalgamation) + + +# sigh +iswindows = sys.platform in ('win32', ) + +# prefix for test files (eg if you want it on tmpfs) +TESTFILEPREFIX = os.environ.get("APSWTESTPREFIX", "") + + +def read_whole_file(name, mode, encoding=None): + if "t" in mode and not encoding: + encoding = "utf8" + if encoding: + f = open(name, mode, encoding=encoding) + else: + f = open(name, mode) + try: + return f.read() + finally: + f.close() + + +# If two is present then one is encoding +def write_whole_file(name, mode, data, *, encoding=None): + if "t" in mode and not encoding: + encoding = "utf8" + if encoding: + f = open(name, mode, encoding=encoding) + else: + f = open(name, mode) + try: + f.write(data) + finally: + f.close() + + +# unittest stuff from here on + +import unittest +import math +import random +import time +import threading +import glob +import pickle +import shutil +import getpass +import queue +import traceback +import re +import gc +try: + import ctypes + import _ctypes +except: + ctypes = None + _ctypes = None + +# yay +is64bit = ctypes and ctypes.sizeof(ctypes.c_size_t) >= 8 + +# Make next switch between the iterator and fetchone alternately +_realnext = next +_nextcounter = 0 + + +def next(cursor, *args): + global _nextcounter + _nextcounter += 1 + if _nextcounter % 2: + return _realnext(cursor, *args) + res = cursor.fetchone() + if res is None: + if args: + return args[0] + return None + return res + + +# py3 has a useless sys.excepthook mainly to avoid allocating any +# memory as the exception could have been running out of memory. So +# we use our own which is also valuable on py2 as it says it is an +# unraiseable exception (with testcode you sometimes can't tell if it +# is unittest showing you an exception or the unraiseable). It is +# mainly VFS code that needs to raise these. +def ehook(etype, evalue, etraceback): + sys.stderr.write("Unraiseable exception " + str(etype) + ":" + str(evalue) + "\n") + traceback.print_tb(etraceback) + + +sys.excepthook = ehook + + +# helper functions +def randomintegers(howmany): + for i in range(howmany): + yield (random.randint(0, 9999999999), ) + + +def randomstring(length): + l = list("abcdefghijklmnopqrstuvwxyz0123456789") + while len(l) < length: + l.extend(l) + l = l[:length] + random.shuffle(l) + return "".join(l) + + +# An instance of this class is used to get the -1 return value to the +# C api PyObject_IsTrue +class BadIsTrue(int): + + def __bool__(self): + 1 / 0 + + +# helper class - runs code in a separate thread +class ThreadRunner(threading.Thread): + + def __init__(self, callable, *args, **kwargs): + threading.Thread.__init__(self) + self.daemon - True + self.callable = callable + self.args = args + self.kwargs = kwargs + self.q = queue.Queue() + self.started = False + + def start(self): + if not self.started: + self.started = True + threading.Thread.start(self) + + def go(self): + self.start() + t, res = self.q.get() + if t: # result + return res + else: # exception + raise res[1].with_traceback(res[2]) + + def run(self): + try: + self.q.put((True, self.callable(*self.args, **self.kwargs))) + except: + self.q.put((False, sys.exc_info())) + + +# Windows doesn't allow files that are open to be deleted. Even after +# we close them, tagalongs such as virus scanners, tortoisesvn etc can +# keep them open. But the good news is that we can rename a file that +# is in use. This background thread does the background deletions of the +# renamed files +def bgdel(): + q = bgdelq + while True: + name = q.get() + while os.path.exists(name): + try: + if os.path.isfile(name): + os.remove(name) + else: + shutil.rmtree(name) + except: + pass + if os.path.exists(name): + time.sleep(0.1) + + +bgdelq = queue.Queue() +bgdelthread = threading.Thread(target=bgdel) +bgdelthread.daemon = True +bgdelthread.start() + + +def deletefile(name): + try: + os.remove(name) + except: + pass + l = list("abcdefghijklmn") + random.shuffle(l) + newname = name + "-n-" + "".join(l) + count = 0 + while os.path.exists(name): + count += 1 + try: + os.rename(name, newname) + except: + if count > 30: # 3 seconds we have been at this! + # So give up and give it a stupid name. The sooner + # this so called operating system withers into obscurity + # the better + n = list("abcdefghijklmnopqrstuvwxyz") + random.shuffle(n) + n = "".join(n) + try: + os.rename(name, "windowssucks-" + n + ".deletememanually") + except: + pass + break + # Make windows happy + time.sleep(0.1) + gc.collect() + if os.path.exists(newname): + bgdelq.put(newname) + # Give bg thread a chance to run + time.sleep(0.1) + + +# Monkey patching FTW +if not hasattr(unittest.TestCase, "assertTrue"): + unittest.TestCase.assertTrue = unittest.TestCase.assert_ + +openflags = apsw.SQLITE_OPEN_READWRITE | apsw.SQLITE_OPEN_CREATE | apsw.SQLITE_OPEN_URI + + +# main test class/code +class APSW(unittest.TestCase): + + connection_nargs={ # number of args for function. those not listed take zero + 'createaggregatefunction': 2, + 'createcollation': 2, + 'createscalarfunction': 3, + 'collationneeded': 1, + 'setauthorizer': 1, + 'setbusyhandler': 1, + 'setbusytimeout': 1, + 'setcommithook': 1, + 'setprofile': 1, + 'setrollbackhook': 1, + 'setupdatehook': 1, + 'setprogresshandler': 2, + 'enableloadextension': 1, + 'createmodule': 2, + 'filecontrol': 3, + 'setexectrace': 1, + 'setrowtrace': 1, + '__enter__': 0, + '__exit__': 3, + 'backup': 3, + 'wal_autocheckpoint': 1, + 'setwalhook': 1, + 'readonly': 1, + 'db_filename': 1, + 'set_last_insert_rowid': 1, + 'serialize': 1, + 'deserialize': 2, + 'autovacuum_pages': 1, + } + + cursor_nargs = { + 'execute': 1, + 'executemany': 2, + 'setexectrace': 1, + 'setrowtrace': 1, + } + + blob_nargs = {'write': 1, 'read': 1, 'readinto': 1, 'reopen': 1, 'seek': 2} + + def deltempfiles(self): + for name in ("testdb", "testdb2", "testdb3", "testfile", "testfile2", "testdb2x", "test-shell-1", + "test-shell-1.py", "test-shell-in", "test-shell-out", "test-shell-err"): + for i in "-shm", "-wal", "-journal", "": + if os.path.exists(TESTFILEPREFIX + name + i): + deletefile(TESTFILEPREFIX + name + i) + + saved_connection_hooks = [] + + def setUp(self): + # clean out database and journals from last runs + self.saved_connection_hooks.append(apsw.connection_hooks) + gc.collect() + self.deltempfiles() + self.db = apsw.Connection(TESTFILEPREFIX + "testdb", flags=openflags) + self.warnings_filters = warnings.filters + + def tearDown(self): + if self.db is not None: + self.db.close(True) + del self.db + apsw.connection_hooks = self.saved_connection_hooks.pop() # back to original value + gc.collect() + self.deltempfiles() + warnings.filters = self.warnings_filters + getattr(warnings, "_filters_mutated", lambda: True)() + + def suppressWarning(self, name): + if hasattr(__builtins__, name): + warnings.simplefilter("ignore", getattr(__builtins__, name)) + + def assertRaisesRegexCompat(self, etype, pattern, func, *args): + self.assertRaises(etype, func) + + def assertTableExists(self, tablename): + self.assertEqual(next(self.db.cursor().execute("select count(*) from [" + tablename + "]"))[0], 0) + + def assertTableNotExists(self, tablename): + # you get SQLError if the table doesn't exist! + self.assertRaises(apsw.SQLError, self.db.cursor().execute, "select count(*) from [" + tablename + "]") + + def assertTablesEqual(self, dbl, left, dbr, right): + # Ensure tables have the same contents. Rowids can be + # different and select gives unordered results so this is + # quite challenging + l = dbl.cursor() + r = dbr.cursor() + # check same number of rows + lcount = l.execute("select count(*) from [" + left + "]").fetchall()[0][0] + rcount = r.execute("select count(*) from [" + right + "]").fetchall()[0][0] + self.assertEqual(lcount, rcount) + # check same number and names and order for columns + lnames = [row[1] for row in l.execute("pragma table_info([" + left + "])")] + rnames = [row[1] for row in r.execute("pragma table_info([" + left + "])")] + self.assertEqual(lnames, rnames) + # read in contents, sort and compare + lcontents = l.execute("select * from [" + left + "]").fetchall() + rcontents = r.execute("select * from [" + right + "]").fetchall() + lcontents.sort(key=lambda x: repr(x)) + rcontents.sort(key=lambda x: repr(x)) + self.assertEqual(lcontents, rcontents) + + def assertRaisesUnraisable(self, exc, func, *args, **kwargs): + return self.baseAssertRaisesUnraisable(True, exc, func, args, kwargs) + + def assertMayRaiseUnraisable(self, exc, func, *args, **kwargs): + """Like assertRaisesUnraiseable but no exception may be raised. + + If one is raised, it must have the expected type. + """ + return self.baseAssertRaisesUnraisable(False, exc, func, args, kwargs) + + def baseAssertRaisesUnraisable(self, must_raise, exc, func, args, kwargs): + orig = sys.excepthook, getattr(sys, "unraisablehook", None) + try: + called = [] + + def ehook(*args): + if len(args) == 1: + t = args[0].exc_type + v = args[0].exc_value + tb = args[0].exc_traceback + else: + t, v, tb = args + called.append((t, v, tb)) + + sys.excepthook = sys.unraisablehook = ehook + + try: + try: + return func(*args, **kwargs) + except: + # This ensures frames have their local variables + # cleared before we put the original excepthook + # back. Clearing the variables results in some + # more SQLite operations which also can raise + # unraisables. traceback.clear_frames was + # introduced in Python 3.4 and unittest was + # updated to call it in assertRaises. See issue + # 164 + if hasattr(traceback, "clear_frames"): + traceback.clear_frames(sys.exc_info()[2]) + raise + finally: + if must_raise and len(called) < 1: + self.fail("Call %s(*%s, **%s) did not do any unraiseable" % (func, args, kwargs)) + if len(called): + self.assertEqual(exc, called[0][0]) # check it was the correct type + finally: + sys.excepthook, sys.unraisablehook = orig + + def testSanity(self): + "Check all parts compiled and are present" + # check some error codes etc are present - picked first middle and last from lists in code + apsw.SQLError + apsw.MisuseError + apsw.NotADBError + apsw.ThreadingViolationError + apsw.BindingsError + apsw.ExecTraceAbort + apsw.SQLITE_FCNTL_SIZE_HINT + apsw.mapping_file_control["SQLITE_FCNTL_SIZE_HINT"] == apsw.SQLITE_FCNTL_SIZE_HINT + apsw.URIFilename + apsw.SQLITE_INDEX_CONSTRAINT_NE # ticket 289 + self.assertTrue(len(apsw.sqlite3_sourceid()) > 10) + + def testModuleExposed(self): + "Check what is exposed and usage" + for name in "Connection", "Cursor", "Blob", "Backup", "zeroblob", "VFS", "VFSFile", "URIFilename": + self.assertTrue(hasattr(apsw, name), "expected name apsw." + name) + + for name in "Blob", "Backup": + self.assertRaisesRegex(TypeError, "cannot create .* instances", getattr(apsw, name)) + + def testConnection(self): + "Test connection opening" + # bad keyword arg + self.assertRaises(TypeError, apsw.Connection, ":memory:", user="nobody") + # wrong types + self.assertRaises(TypeError, apsw.Connection, 3) + # bad file (cwd) + self.assertRaises(apsw.CantOpenError, apsw.Connection, ".") + # bad open flags can't be tested as sqlite accepts them all - ticket #3037 + # self.assertRaises(apsw.CantOpenError, apsw.Connection, "", flags=65535) + + # bad vfs + self.assertRaises(TypeError, apsw.Connection, "foo", vfs=3, flags=-1) + self.assertRaises(apsw.SQLError, apsw.Connection, "foo", vfs="jhjkds") + + def testConnectionFileControl(self): + "Verify sqlite3_file_control" + # Note that testVFS deals with success cases and the actual vfs backend + self.assertRaises(TypeError, self.db.filecontrol, 1, 2) + self.assertRaises(TypeError, self.db.filecontrol, "main", 1001, "foo") + self.assertRaises(OverflowError, self.db.filecontrol, "main", 1001, 45236748972389749283) + self.assertEqual(self.db.filecontrol("main", 1001, 25), False) + + def testConnectionConfig(self): + "Test Connection.config function" + self.assertRaises(TypeError, self.db.config) + self.assertRaises(TypeError, self.db.config, "three") + x = 0x7fffffff + self.assertRaises(OverflowError, self.db.config, x * x * x * x * x) + self.assertRaises(ValueError, self.db.config, 82397) + self.assertRaises(TypeError, self.db.config, apsw.SQLITE_DBCONFIG_ENABLE_FKEY, "banana") + for i in apsw.SQLITE_DBCONFIG_ENABLE_FKEY, apsw.SQLITE_DBCONFIG_ENABLE_TRIGGER, apsw.SQLITE_DBCONFIG_ENABLE_QPSG: + self.assertEqual(1, self.db.config(i, 1)) + self.assertEqual(1, self.db.config(i, -1)) + self.assertEqual(0, self.db.config(i, 0)) + + def testConnectionNames(self): + "Test Connection.db_names" + self.assertRaises(TypeError, self.db.db_names, 3) + expected = ["main", "temp"] + self.assertEqual(expected, self.db.db_names()) + for t in "", APSW.wikipedia_text: + self.db.cursor().execute(f"attach '{ self.db.db_filename('main') }' as '{ t }'") + expected.append(t) + self.assertEqual(expected, self.db.db_names()) + while True: + t = f"{ expected[-1] }-{ len(expected) }" + try: + self.db.cursor().execute(f"attach '{ self.db.db_filename('main') }' as '{ t }'") + except apsw.SQLError: + # SQLError: too many attached databases - max .... + break + expected.append(t) + self.assertEqual(expected, self.db.db_names()) + while len(expected) > 2: + i = random.randint(2, len(expected) - 1) + self.db.cursor().execute(f"detach '{ expected[i] }'") + del expected[i] + self.assertEqual(expected, self.db.db_names()) + + def testBackwardsCompatibility(self): + "Verifies changed names etc are still accessible through the old ones" + # depends on pep562 which is python 3.7 onwards + if sys.version_info >= (3, 7): + self.assertIs(apsw.main, apsw.shell.main) + self.assertIs(apsw.Shell, apsw.shell.Shell) + + def testCursorFactory(self): + "Test Connection.cursor_factory" + seqbindings = ((3, ), ) * 3 + self.assertEqual(self.db.cursor_factory, apsw.Cursor) + for not_callable in (None, apsw, 3): + try: + self.db.cursor_factory = not_callable + 1 / 0 + except TypeError: + pass + + def error(): + 1 / 0 + + self.db.cursor_factory = error + self.assertRaises(TypeError, self.db.execute, "select 3") + self.assertRaises(TypeError, self.db.executemany, "select 3", seqbindings) + + def error(_): + return 3 + + self.db.cursor_factory = error + self.assertRaises(TypeError, self.db.execute, "select 3") + self.assertRaises(TypeError, self.db.executemany, "select 3") + + class error: + + def __init__(self, _): + pass + + self.db.cursor_factory = error + self.assertRaises(AttributeError, self.db.execute, "select 3") + self.assertRaises(AttributeError, self.db.executemany, "select ?", seqbindings) + + class inherits(apsw.Cursor): + pass + + self.db.cursor_factory = inherits + self.assertEqual(self.db.execute("select 3").fetchall(), self.db.cursor().execute("select 3").fetchall()) + self.assertEqual( + self.db.executemany("select ?", seqbindings).fetchall(), + self.db.cursor().executemany("select ?", seqbindings).fetchall()) + # kwargs + self.assertEqual( + self.db.execute(bindings=tuple(), statements="select 3").fetchall(), + self.db.cursor().execute(bindings=None, statements="select 3").fetchall()) + self.assertEqual( + self.db.executemany(sequenceofbindings=seqbindings, statements="select ?").fetchall(), + self.db.cursor().executemany(statements="select ?", sequenceofbindings=seqbindings).fetchall()) + + # check cursor_factory across closes + class big: + # make the class consume some memory + memory = b"12345678" * 4096 + + db2 = apsw.Connection("") + self.assertEqual(db2.cursor_factory, apsw.Cursor) + db2.cursor_factory = big + self.assertEqual(db2.cursor_factory, big) + db2.close() + # factory becomes None when closing + self.assertIsNone(db2.cursor_factory) + # if this leaks it will show up in memory reports + db2.cursor_factory = big + del big + + def testMemoryLeaks(self): + "MemoryLeaks: Run with a memory profiler such as valgrind and debug Python" + # make and toss away a bunch of db objects, cursors, functions etc - if you use memory profiling then + # simple memory leaks will show up + c = self.db.cursor() + c.execute("create table foo(x)") + vals = [[1], [None], [math.pi], ["kjkljkljl"], [u"\u1234\u345432432423423kjgjklhdfgkjhsdfjkghdfjskh"], + [b"78696ghgjhgjhkgjkhgjhg\xfe\xdf"]] + c.executemany("insert into foo values(?)", vals) + for i in range(MEMLEAKITERATIONS): + db = apsw.Connection(TESTFILEPREFIX + "testdb") + db.createaggregatefunction("aggfunc", lambda x: x) + db.createscalarfunction("scalarfunc", lambda x: x) + db.setbusyhandler(lambda x: False) + db.setbusytimeout(1000) + db.setcommithook(lambda x=1: 0) + db.setrollbackhook(lambda x=2: 1) + db.setupdatehook(lambda x=3: 2) + db.setwalhook(lambda *args: 0) + db.collationneeded(lambda x: 4) + + def rt1(c, r): + db.setrowtrace(rt2) + return r + + def rt2(c, r): + c.setrowtrace(rt1) + return r + + def et1(c, s, b): + db.setexectrace(et2) + return True + + def et2(c, s, b): + c.setexectrace(et1) + return True + + for i in range(120): + c2 = db.cursor() + c2.setrowtrace(rt1) + c2.setexectrace(et1) + for row in c2.execute("select * from foo" + " " * i): # spaces on end defeat statement cache + pass + del c2 + db.close() + + def testBindings(self): + "Check bindings work correctly" + c = self.db.cursor() + c.execute("create table foo(x,y,z)") + vals = ( + ("(?,?,?)", (1, 2, 3)), + ("(?,?,?)", [1, 2, 3]), + ("(?,?,?)", range(1, 4)), + ("(:a,$b,:c)", { + 'a': 1, + 'b': 2, + 'c': 3 + }), + ("(1,?,3)", (2, )), + ("(1,$a,$c)", { + 'a': 2, + 'b': 99, + 'c': 3 + }), + # some unicode fun + (u"($\N{LATIN SMALL LETTER E WITH CIRCUMFLEX},:\N{LATIN SMALL LETTER A WITH TILDE},$\N{LATIN SMALL LETTER O WITH DIAERESIS})", + (1, 2, 3)), + (u"($\N{LATIN SMALL LETTER E WITH CIRCUMFLEX},:\N{LATIN SMALL LETTER A WITH TILDE},$\N{LATIN SMALL LETTER O WITH DIAERESIS})", + { + u"\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}": 1, + u"\N{LATIN SMALL LETTER A WITH TILDE}": 2, + u"\N{LATIN SMALL LETTER O WITH DIAERESIS}": 3, + })) + + for str, bindings in vals: + c.execute("insert into foo values" + str, bindings) + self.assertEqual(next(c.execute("select * from foo")), (1, 2, 3)) + c.execute("delete from foo") + + # currently missing dict keys come out as null + c.execute("insert into foo values(:a,:b,$c)", {'a': 1, 'c': 3}) # 'b' deliberately missing + self.assertEqual((1, None, 3), next(c.execute("select * from foo"))) + c.execute("delete from foo") + + # these ones should cause errors + vals = ( + (apsw.BindingsError, "(?,?,?)", (1, 2)), # too few + (apsw.BindingsError, "(?,?,?)", (1, 2, 3, 4)), # too many + (apsw.BindingsError, "(?,?,?)", None), # none at all + (apsw.BindingsError, "(?,?,?)", { + 'a': 1 + }), # ? type, dict bindings (note that the reverse will work since all + # named bindings are also implicitly numbered + (TypeError, "(?,?,?)", 2), # not a dict or sequence + (TypeError, "(:a,:b,:c)", { + 'a': 1, + 'b': 2, + 'c': self + }), # bad type for c + ) + for exc, str, bindings in vals: + self.assertRaises(exc, c.execute, "insert into foo values" + str, bindings) + + # with multiple statements + c.execute("insert into foo values(?,?,?); insert into foo values(?,?,?)", (99, 100, 101, 102, 103, 104)) + self.assertRaises(apsw.BindingsError, c.execute, + "insert into foo values(?,?,?); insert into foo values(?,?,?); insert some more", + (100, 100, 101, 1000, 103)) # too few + self.assertRaises(apsw.BindingsError, c.execute, "insert into foo values(?,?,?); insert into foo values(?,?,?)", + (101, 100, 101, 1000, 103, 104, 105)) # too many + # check the relevant statements did or didn't execute as appropriate + self.assertEqual(next(self.db.cursor().execute("select count(*) from foo where x=99"))[0], 1) + self.assertEqual(next(self.db.cursor().execute("select count(*) from foo where x=102"))[0], 1) + self.assertEqual(next(self.db.cursor().execute("select count(*) from foo where x=100"))[0], 1) + self.assertEqual(next(self.db.cursor().execute("select count(*) from foo where x=1000"))[0], 0) + self.assertEqual(next(self.db.cursor().execute("select count(*) from foo where x=101"))[0], 1) + self.assertEqual(next(self.db.cursor().execute("select count(*) from foo where x=105"))[0], 0) + + # check there are some bindings! + self.assertRaises(apsw.BindingsError, c.execute, "create table bar(x,y,z);insert into bar values(?,?,?)") + + # across executemany + vals = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) + c.executemany("insert into foo values(?,?,?);", vals) + for x, y, z in vals: + self.assertEqual(next(c.execute("select * from foo where x=?", (x, ))), (x, y, z)) + + # with an iterator + def myvals(): + for i in range(10): + yield {'a': i, 'b': i * 10, 'c': i * 100} + + c.execute("delete from foo") + c.executemany("insert into foo values($a,:b,$c)", myvals()) + c.execute("delete from foo") + + # errors for executemany + self.assertRaises(TypeError, c.executemany, "statement", 12, 34, 56) # incorrect num params + self.assertRaises(TypeError, c.executemany, "statement", 12) # wrong type + self.assertRaises(apsw.SQLError, c.executemany, "syntax error", [(1, )]) # error in prepare + + def myiter(): + yield 1 / 0 + + self.assertRaises(ZeroDivisionError, c.executemany, "statement", myiter()) # immediate error in iterator + + def myiter(): + yield self + + self.assertRaises(TypeError, c.executemany, "statement", myiter()) # immediate bad type + self.assertRaises(TypeError, c.executemany, "select ?", ((self, ), (1))) # bad val + c.executemany("statement", ()) # empty sequence + + # error in iterator after a while + def myvals(): + for i in range(2): + yield {'a': i, 'b': i * 10, 'c': i * 100} + 1 / 0 + + self.assertRaises(ZeroDivisionError, c.executemany, "insert into foo values($a,:b,$c)", myvals()) + self.assertEqual(next(c.execute("select count(*) from foo"))[0], 2) + c.execute("delete from foo") + + # return bad type from iterator after a while + def myvals(): + for i in range(2): + yield {'a': i, 'b': i * 10, 'c': i * 100} + yield self + + self.assertRaises(TypeError, c.executemany, "insert into foo values($a,:b,$c)", myvals()) + self.assertEqual(next(c.execute("select count(*) from foo"))[0], 2) + c.execute("delete from foo") + + # some errors in executemany + self.assertRaises(apsw.BindingsError, c.executemany, "insert into foo values(?,?,?)", ((1, 2, 3), (1, 2, 3, 4))) + self.assertRaises(apsw.BindingsError, c.executemany, "insert into foo values(?,?,?)", ((1, 2, 3), (1, 2))) + + # incomplete execution across executemany + c.executemany("select * from foo; select ?", ((1, ), (2, ))) # we don't read + self.assertRaises(apsw.IncompleteExecutionError, c.executemany, "begin") + + # set type (pysqlite error with this) + c.execute("create table xxset(x,y,z)") + c.execute("insert into xxset values(?,?,?)", set((1, 2, 3))) + c.executemany("insert into xxset values(?,?,?)", (set((4, 5, 6)), )) + result = [(1, 2, 3), (4, 5, 6)] + for i, v in enumerate(c.execute("select * from xxset order by x")): + self.assertEqual(v, result[i]) + + def testCursor(self): + "Check functionality of the cursor" + c = self.db.cursor() + # shouldn't be able to manually create + self.assertRaises(TypeError, apsw.Cursor) + self.assertRaises(TypeError, apsw.Cursor, 3) + self.assertRaises(TypeError, apsw.Cursor, c) + + class consub(apsw.Connection): + pass + + con2 = consub("") + assert isinstance(con2, apsw.Connection) and not type(con2) == apsw.Connection + apsw.Cursor(con2) + + # give bad params + self.assertRaises(TypeError, c.execute) + self.assertRaises(TypeError, c.execute, "foo", "bar", "bam") + + # empty statements + c.execute("") + c.execute(" ;\n\t\r;;") + + # unicode + self.assertEqual(3, next(c.execute(u"select 3"))[0]) + + # does it work? + c.execute("create table foo(x,y,z)") + # table should be empty + entry = -1 + for entry, values in enumerate(c.execute("select * from foo")): + pass + self.assertEqual(entry, -1, "No rows should have been returned") + # add ten rows + for i in range(10): + c.execute("insert into foo values(1,2,3)") + for entry, values in enumerate(c.execute("select * from foo")): + # check we get back out what we put in + self.assertEqual(values, (1, 2, 3)) + self.assertEqual(entry, 9, "There should have been ten rows") + # does getconnection return the right object + self.assertIs(c.getconnection(), self.db) + self.assertIs(c.connection, self.db) + self.assertIs(c.connection, c.getconnection()) + # check getdescription - note column with space in name and [] syntax to quote it + cols = ( + ("x a space", "INTEGER"), + ("y", "TEXT"), + ("z", "foo"), + ("a", "char"), + (u"\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}\N{LATIN SMALL LETTER A WITH TILDE}", + u"\N{LATIN SMALL LETTER O WITH DIAERESIS}\N{LATIN SMALL LETTER U WITH CIRCUMFLEX}"), + ) + c.execute("drop table foo; create table foo (%s)" % (", ".join(["[%s] %s" % (n, t) for n, t in cols]), )) + c.execute("insert into foo([x a space]) values(1)") + c.execute( + "create temp table two(fred banana); insert into two values(7); create temp view three as select fred as [a space] from two" + ) + c.execute("select 3") # see issue #370 + has_full = any(o == "ENABLE_COLUMN_METADATA" or o.startswith("ENABLE_COLUMN_METADATA=") + for o in apsw.compile_options) if apsw.using_amalgamation else hasattr(c, "description_full") + for row in c.execute("select * from foo"): + self.assertEqual(cols, c.getdescription()) + self.assertEqual(has_full, hasattr(c, "description_full")) + self.assertEqual(cols, tuple([d[:2] for d in c.description])) + self.assertEqual((None, None, None, None, None), c.description[0][2:]) + self.assertEqual(list(map(len, c.description)), [7] * len(cols)) + if has_full: + for row in c.execute("select * from foo join three"): + self.assertEqual(c.description_full, + (('x a space', 'INTEGER', 'main', 'foo', 'x a space'), + ('y', 'TEXT', 'main', 'foo', 'y'), ('z', 'foo', 'main', 'foo', 'z'), + ('a', 'char', 'main', 'foo', 'a'), ('êã', 'öû', 'main', 'foo', 'êã'), + ('a space', 'banana', 'temp', 'two', 'fred'))) + # check description caching isn't broken + cols2 = cols[1:4] + for row in c.execute("select y,z,a from foo"): + self.assertEqual(cols2, c.getdescription()) + self.assertEqual(cols2, tuple([d[:2] for d in c.description])) + self.assertEqual((None, None, None, None, None), c.description[0][2:]) + self.assertEqual(list(map(len, c.description)), [7] * len(cols2)) + # execution is complete ... + self.assertRaises(apsw.ExecutionCompleteError, c.getdescription) + self.assertRaises(apsw.ExecutionCompleteError, lambda: c.description) + if has_full: + self.assertRaises(apsw.ExecutionCompleteError, lambda: c.description_full) + self.assertRaises(StopIteration, lambda xx=0: _realnext(c)) + self.assertRaises(StopIteration, lambda xx=0: _realnext(c)) + # fetchone is used throughout, check end behaviour + self.assertEqual(None, c.fetchone()) + self.assertEqual(None, c.fetchone()) + self.assertEqual(None, c.fetchone()) + # nulls for getdescription + for row in c.execute("pragma user_version"): + self.assertEqual(c.getdescription(), (('user_version', None), )) + # incomplete + c.execute("select * from foo; create table bar(x)") # we don't bother reading leaving + self.assertRaises(apsw.IncompleteExecutionError, c.execute, "select * from foo") # execution incomplete + self.assertTableNotExists("bar") + # autocommit + self.assertEqual(True, self.db.getautocommit()) + c.execute("begin immediate") + self.assertEqual(False, self.db.getautocommit()) + # pragma + c.execute("pragma user_version") + c.execute("pragma pure=nonsense") + # error + self.assertRaises(apsw.SQLError, c.execute, + "create table bar(x,y,z); this is a syntax error; create table bam(x,y,z)") + self.assertTableExists("bar") + self.assertTableNotExists("bam") + # fetchall + self.assertEqual(c.fetchall(), []) + self.assertEqual(c.execute("select 3; select 4").fetchall(), [(3, ), (4, )]) + # readonly, explain & expanded_sql attributes + res = None + def tracer(cur, query, bindings): + nonlocal res + res = {"cursor": cur, "query": query, "bindings": bindings, "readonly": cur.is_readonly, "explain": cur.is_explain} + return True + self.assertIsNone(c.exectrace) + c.setexectrace(tracer) + self.assertIs(c.exectrace, tracer) + c.execute("pragma user_version") + self.assertIs(res["cursor"], c) + self.assertTrue(res["readonly"]) + self.assertEqual(res["explain"], 0) + c.execute("explain pragma user_version") + self.assertEqual(res["explain"], 1) + c.execute("explain query plan select 3") + self.assertEqual(res["explain"], 2) + c.execute("pragma user_version=42") + self.assertFalse(res["readonly"]) + biggy="9" * 24 * 1024 + ran = False + for row in c.execute("select ?,?", (biggy, biggy)): + ran = True + self.assertEqual(f"select '{ biggy }','{ biggy }'", c.expanded_sql) + existing = self.db.limit(apsw.SQLITE_LIMIT_LENGTH, 25 * 1024) + self.assertIsNone(c.expanded_sql) + self.db.limit(apsw.SQLITE_LIMIT_LENGTH, existing) + self.assertTrue(ran) + # keyword args + c.execute("pragma user_version=73", bindings=None, can_cache=False, prepare_flags=0).fetchall() + c.executemany(statements="select ?", sequenceofbindings=((1,), (2,)), can_cache=False, prepare_flags=0).fetchall() + + def testIssue373(self): + "issue 373: dict type checking in bindings" + import collections.abc + + class not_a_dict: + pass + + class dict_lookalike(collections.abc.Mapping): + def __getitem__(self, _): + return 99 + + def __iter__(*args): + raise NotImplementedError + + def __len__(*args): + raise NotImplementedError + + class errors_be_here: + def __instancecheck__(self, _): + 1/0 + def __subclasscheck__(self, _): + 1/0 + + class dict_with_error: + def __getitem__(self, _): + 1/0 + + collections.abc.Mapping.register(dict_with_error) + + class coerced_to_list: + # this is not registered as dict, and instead PySequence_Fast will + # turn it into a list calling the method for each key + def __getitem__(self, key): + if key < 10: + return key + 1/0 + + class dict_subclass(dict): + pass + + self.assertRaises(TypeError, self.db.execute, "select :name", not_a_dict()) + self.assertEqual([(99,)], self.db.execute("select :name", dict_lookalike()).fetchall()) + # make sure these aren't detected as dict + for thing in (1,), {1}, [1]: + self.assertRaises(TypeError, self.db.execute("select :name", thing)) + + self.assertRaises(TypeError, self.db.execute, "select :name", errors_be_here()) + self.assertRaises(ZeroDivisionError, self.db.execute, "select :name", dict_with_error()) + self.assertEqual([(None,)], self.db.execute("select :name", {}).fetchall()) + self.assertEqual([(None,)], self.db.execute("select :name", dict_subclass()).fetchall()) + self.assertRaises(ZeroDivisionError, self.db.execute, "select ?", coerced_to_list()) + + # same tests with executemany + self.assertRaises(TypeError, self.db.executemany, "select :name", (not_a_dict(),)) + self.assertEqual([(99,)], self.db.executemany("select :name", [dict_lookalike()]).fetchall()) + # make sure these aren't detected as dict + for thing in (1,), {1}, [1]: + self.assertRaises(TypeError, self.db.executemany("select :name", [thing])) + + self.assertRaises(TypeError, self.db.executemany, "select :name", errors_be_here()) + self.assertRaises(ZeroDivisionError, self.db.executemany, "select :name", dict_with_error()) + self.assertEqual([(None,)], self.db.executemany("select :name", ({},)).fetchall()) + self.assertEqual([(None,)], self.db.executemany("select :name", [dict_subclass()]).fetchall()) + self.assertRaises(ZeroDivisionError, self.db.executemany, "select ?", (coerced_to_list(),)) + + def testIssue376(self): + "Whitespace treated as incomplete execution" + c = self.db.cursor() + for statement in ( + "select 3", + "select 3;", + "select 3; ", + "select 3; ;\t\r\n; ", + ): + c.execute(statement) + # should not throw incomplete + c.execute("select 4") + self.assertEqual([(3,), (4,)], c.execute(statement + "; select 4").fetchall()) + + + def testTypes(self): + "Check type information is maintained" + c = self.db.cursor() + c.execute("create table foo(row,x)") + + vals = test_types_vals + + for i, v in enumerate(vals): + c.execute("insert into foo values(?,?)", (i, v)) + + # add function to test conversion back as well + def snap(*args): + return args[0] + + self.db.createscalarfunction("snap", snap) + + # now see what we got out + count = 0 + for row, v, fv in c.execute("select row,x,snap(x) from foo"): + count += 1 + if type(vals[row]) is float: + self.assertAlmostEqual(vals[row], v) + self.assertAlmostEqual(vals[row], fv) + else: + self.assertEqual(vals[row], v) + self.assertEqual(vals[row], fv) + self.assertEqual(count, len(vals)) + + # check some out of bounds conditions + # integer greater than signed 64 quantity (SQLite only supports up to that) + self.assertRaises(OverflowError, c.execute, "insert into foo values(9999,?)", (922337203685477580799, )) + self.assertRaises(OverflowError, c.execute, "insert into foo values(9999,?)", (-922337203685477580799, )) + + # not valid types for SQLite + self.assertRaises(TypeError, c.execute, "insert into foo values(9999,?)", (apsw, )) # a module + self.assertRaises(TypeError, c.execute, "insert into foo values(9999,?)", (type, )) # type + self.assertRaises(TypeError, c.execute, "insert into foo values(9999,?)", (dir, )) # function + + # check nothing got inserted + self.assertEqual(0, next(c.execute("select count(*) from foo where row=9999"))[0]) + + def testFormatSQLValue(self): + "Verify text formatting of values" + wt = APSW.wikipedia_text + vals = ( + (3, "3"), + (3.1, "3.1"), + (-3, "-3"), + (-3.1, "-3.1"), + (9223372036854775807, "9223372036854775807"), + (-9223372036854775808, "-9223372036854775808"), + (None, "NULL"), + ("ABC", "'ABC'"), + (u"\N{BLACK STAR} \N{WHITE STAR} \N{LIGHTNING} \N{COMET} ", + "'" + u"\N{BLACK STAR} \N{WHITE STAR} \N{LIGHTNING} \N{COMET} " + "'"), + (u"\N{BLACK STAR} \N{WHITE STAR} ' \N{LIGHTNING} \N{COMET} ", + "'" + u"\N{BLACK STAR} \N{WHITE STAR} '' \N{LIGHTNING} \N{COMET} " + "'"), + ("", "''"), + ("'", "''''"), + ("'a", "'''a'"), + ("a'", "'a'''"), + ("''", "''''''"), + ("'" * 20000, "'" + "'" * 40000 + "'"), + ("\0", "''||X'00'||''"), + ("\0\0\0", "''||X'00'||''||X'00'||''||X'00'||''"), + ("AB\0C", "'AB'||X'00'||'C'"), + ("A'B'\0C", "'A''B'''||X'00'||'C'"), + ("\0A'B", "''||X'00'||'A''B'"), + ("A'B\0", "'A''B'||X'00'||''"), + (b"ABDE\0C", "X'414244450043'"), + (b"", "X''"), + (wt, "'" + wt + "'"), + (wt[:77] + "'" + wt[77:], "'" + wt[:77] + "''" + wt[77:] + "'"), + ) + for vin, vout in vals: + out = apsw.format_sql_value(vin) + self.assertEqual(out, vout) + # Errors + self.assertRaises(TypeError, apsw.format_sql_value, apsw) + self.assertRaises(TypeError, apsw.format_sql_value) + + def testWAL(self): + "Test WAL functions" + # note that it is harmless calling wal functions on a db not in wal mode + self.assertRaises(TypeError, self.db.wal_autocheckpoint) + self.assertRaises(TypeError, self.db.wal_autocheckpoint, "a strinbg") + self.db.wal_autocheckpoint(8912) + self.assertRaises(TypeError, self.db.wal_checkpoint, -1) + self.db.wal_checkpoint() + self.db.wal_checkpoint("main") + v = self.db.wal_checkpoint(mode=apsw.SQLITE_CHECKPOINT_PASSIVE) + self.assertTrue(isinstance(v, tuple) and len(v) == 2 and isinstance(v[0], int) and isinstance(v[1], int)) + self.assertRaises(apsw.MisuseError, self.db.wal_checkpoint, mode=876786) + self.assertRaises(TypeError, self.db.setwalhook) + self.assertRaises(TypeError, self.db.setwalhook, 12) + self.db.setwalhook(None) + # check we can set wal mode + self.assertEqual("wal", self.db.cursor().execute("pragma journal_mode=wal").fetchall()[0][0]) + + # errors in wal callback + def zerodiv(*args): + 1 / 0 + + self.db.setwalhook(zerodiv) + self.assertRaises(ZeroDivisionError, self.db.cursor().execute, "create table one(x)") + # the error happens after the wal commit so the table should exist + self.assertTableExists("one") + + def badreturn(*args): + return "three" + + self.db.setwalhook(badreturn) + self.assertRaises(TypeError, self.db.cursor().execute, "create table two(x)") + self.assertTableExists("two") + + expectdbname = "" + + def walhook(conn, dbname, pages): + self.assertTrue(conn is self.db) + self.assertTrue(pages > 0) + self.assertEqual(dbname, expectdbname) + return apsw.SQLITE_OK + + expectdbname = "main" + self.db.setwalhook(walhook) + self.db.cursor().execute("create table three(x)") + self.db.cursor().execute("attach '%stestdb2?psow=0' as fred" % ("file:" + TESTFILEPREFIX, )) + self.assertEqual("wal", self.db.cursor().execute("pragma fred.journal_mode=wal").fetchall()[0][0]) + expectdbname = "fred" + self.db.cursor().execute("create table fred.three(x)") + + def testAuthorizer(self): + "Verify the authorizer works" + retval = apsw.SQLITE_DENY + + def authorizer(operation, paramone, paramtwo, databasename, triggerorview): + # we fail creates of tables starting with "private" + if operation == apsw.SQLITE_CREATE_TABLE and paramone.startswith("private"): + return retval + return apsw.SQLITE_OK + + c = self.db.cursor() + # this should succeed + c.execute("create table privateone(x)") + # this should fail + self.assertRaises(TypeError, self.db.setauthorizer, 12) # must be callable + self.assertRaises(TypeError, setattr, self.db, "authorizer", 12) + self.db.setauthorizer(authorizer) + self.assertIs(self.db.authorizer, authorizer) + for val in apsw.SQLITE_DENY, apsw.SQLITE_DENY, 0x800276889000212112: + retval = val + if val < 100: + self.assertRaises(apsw.AuthError, c.execute, "create table privatetwo(x)") + else: + self.assertRaises(OverflowError, c.execute, "create table privatetwo(x)") + # this should succeed + self.db.setauthorizer(None) + self.assertIsNone(self.db.authorizer) + c.execute("create table privatethree(x)") + + self.assertTableExists("privateone") + self.assertTableNotExists("privatetwo") + self.assertTableExists("privatethree") + + # error in callback + def authorizer(operation, *args): + if operation == apsw.SQLITE_CREATE_TABLE: + 1 / 0 + return apsw.SQLITE_OK + + self.db.authorizer = authorizer + self.assertRaises(ZeroDivisionError, c.execute, "create table shouldfail(x)") + self.assertTableNotExists("shouldfail") + + # bad return type in callback + def authorizer(operation, *args): + return "a silly string" + + self.db.setauthorizer(authorizer) + self.assertRaises(TypeError, c.execute, "create table shouldfail(x); select 3+5") + self.db.authorizer = None # otherwise next line will fail! + self.assertTableNotExists("shouldfail") + + # back to normal + self.db.authorizer = None + c.execute("create table shouldsucceed(x)") + self.assertTableExists("shouldsucceed") + + def testExecTracing(self): + "Verify tracing of executed statements and bindings" + self.db.setexectrace(None) + self.assertIsNone(self.db.exectrace) + self.db.exectrace = None + self.assertIsNone(self.db.exectrace) + c = self.db.cursor() + cmds = [] # this is maniulated in tracefunc + + def tracefunc(cursor, cmd, bindings): + cmds.append((cmd, bindings)) + return True + + c.execute("create table one(x,y,z)") + self.assertEqual(len(cmds), 0) + self.assertRaises(TypeError, c.setexectrace, 12) # must be callable + self.assertRaises(TypeError, self.db.setexectrace, 12) # must be callable + c.setexectrace(tracefunc) + self.assertIs(c.exectrace, tracefunc) + statements = [ + ("insert into one values(?,?,?)", (1, 2, 3)), + ("insert into one values(:a,$b,$c)", { + 'a': 1, + 'b': "string", + 'c': None + }), + ] + for cmd, values in statements: + c.execute(cmd, values) + self.assertEqual(cmds, statements) + self.assertTrue(c.getexectrace() is tracefunc) + c.exectrace = None + self.assertTrue(c.getexectrace() is None) + c.execute("create table bar(x,y,z)") + # cmds should be unchanged + self.assertEqual(cmds, statements) + # tracefunc can abort execution + count = next(c.execute("select count(*) from one"))[0] + + def tracefunc(cursor, cmd, bindings): + return False # abort + + c.setexectrace(tracefunc) + self.assertRaises(apsw.ExecTraceAbort, c.execute, "insert into one values(1,2,3)") + # table should not have been modified + c.setexectrace(None) + self.assertEqual(count, next(c.execute("select count(*) from one"))[0]) + + # error in tracefunc + def tracefunc(cursor, cmd, bindings): + 1 / 0 + + c.setexectrace(tracefunc) + self.assertRaises(ZeroDivisionError, c.execute, "insert into one values(1,2,3)") + c.setexectrace(None) + self.assertEqual(count, next(c.execute("select count(*) from one"))[0]) + # test across executemany and multiple statements + counter = [0] + + def tracefunc(cursor, cmd, bindings): + counter[0] = counter[0] + 1 + return True + + c.setexectrace(tracefunc) + c.execute( + "create table two(x);insert into two values(1); insert into two values(2); insert into two values(?); insert into two values(?)", + (3, 4)) + self.assertEqual(counter[0], 5) + counter[0] = 0 + c.executemany("insert into two values(?); insert into two values(?)", [[n, n + 1] for n in range(5)]) + self.assertEqual(counter[0], 10) + # error in func but only after a while + c.execute("delete from two") + counter[0] = 0 + + def tracefunc(cursor, cmd, bindings): + counter[0] = counter[0] + 1 + if counter[0] > 3: + 1 / 0 + return True + + c.setexectrace(tracefunc) + self.assertRaises( + ZeroDivisionError, c.execute, + "insert into two values(1); insert into two values(2); insert into two values(?); insert into two values(?)", + (3, 4)) + self.assertEqual(counter[0], 4) + c.setexectrace(None) + # check the first statements got executed + self.assertEqual(3, next(c.execute("select max(x) from two"))[0]) + + # executemany + def tracefunc(cursor, cmd, bindings): + 1 / 0 + + c.setexectrace(tracefunc) + self.assertRaises(ZeroDivisionError, c.executemany, "select ?", [(1, )]) + c.setexectrace(None) + + # tracefunc with wrong number of arguments + def tracefunc(a, b, c, d, e, f): + 1 / 0 + + c.setexectrace(tracefunc) + self.assertRaises(TypeError, c.execute, "select max(x) from two") + + def tracefunc(*args): + return BadIsTrue() + + c.setexectrace(tracefunc) + self.assertRaises(ZeroDivisionError, c.execute, "select max(x) from two") + # connection based tracing + self.assertEqual(self.db.getexectrace(), None) + traced = [False, False] + + def contrace(*args): + traced[0] = True + return True + + def curtrace(*args): + traced[1] = True + return True + + c.setexectrace(curtrace) + c.execute("select 3") + self.assertEqual(traced, [False, True]) + traced = [False, False] + self.db.setexectrace(contrace) + c.execute("select 3") + self.assertEqual(traced, [False, True]) + traced = [False, False] + c.setexectrace(None) + c.execute("select 3") + self.assertEqual(traced, [True, False]) + traced = [False, False] + self.db.cursor().execute("select 3") + self.assertEqual(traced, [True, False]) + self.assertEqual(self.db.getexectrace(), contrace) + self.assertEqual(c.getexectrace(), None) + self.assertEqual(self.db.cursor().getexectrace(), None) + c.setexectrace(curtrace) + self.assertEqual(c.getexectrace(), curtrace) + + def testRowTracing(self): + "Verify row tracing" + self.db.setrowtrace(None) + c = self.db.cursor() + c.execute("create table foo(x,y,z)") + vals = (1, 2, 3) + c.execute("insert into foo values(?,?,?)", vals) + + def tracefunc(cursor, row): + return tuple([7 for i in row]) + + # should get original row back + self.assertEqual(next(c.execute("select * from foo")), vals) + self.assertRaises(TypeError, c.setrowtrace, 12) # must be callable + c.setrowtrace(tracefunc) + self.assertTrue(c.getrowtrace() is tracefunc) + # all values replaced with 7 + self.assertEqual(next(c.execute("select * from foo")), tuple([7] * len(vals))) + + def tracefunc(cursor, row): + return (7, ) + + # a single 7 + c.setrowtrace(tracefunc) + self.assertEqual(next(c.execute("select * from foo")), (7, )) + # no alteration again + c.setrowtrace(None) + self.assertEqual(next(c.execute("select * from foo")), vals) + + # error in function + def tracefunc(*result): + 1 / 0 + + c.setrowtrace(tracefunc) + try: + for row in c.execute("select * from foo"): + self.fail("Should have had exception") + break + except ZeroDivisionError: + pass + c.setrowtrace(None) + self.assertEqual(next(c.execute("select * from foo")), vals) + # returning null + c.execute("create table bar(x)") + c.executemany("insert into bar values(?)", [[x] for x in range(10)]) + counter = [0] + + def tracefunc(cursor, args): + counter[0] = counter[0] + 1 + if counter[0] % 2: + return None + return args + + c.setrowtrace(tracefunc) + countertoo = 0 + for row in c.execute("select * from bar"): + countertoo += 1 + c.setrowtrace(None) + self.assertEqual(countertoo, 5) # half the rows should be skipped + # connection based + self.assertRaises(TypeError, self.db.setrowtrace, 12) + self.assertEqual(self.db.getrowtrace(), None) + traced = [False, False] + + def contrace(cursor, row): + traced[0] = True + return row + + def curtrace(cursor, row): + traced[1] = True + return row + + for row in c.execute("select 3,3"): + pass + self.assertEqual(traced, [False, False]) + traced = [False, False] + self.db.setrowtrace(contrace) + for row in self.db.cursor().execute("select 3,3"): + pass + self.assertEqual(traced, [True, False]) + traced = [False, False] + c.setrowtrace(curtrace) + for row in c.execute("select 3,3"): + pass + self.assertEqual(traced, [False, True]) + traced = [False, False] + c.setrowtrace(None) + for row in c.execute("select 3"): + pass + self.assertEqual(traced, [True, False]) + self.assertEqual(self.db.getrowtrace(), contrace) + + def testScalarFunctions(self): + "Verify scalar functions" + c = self.db.cursor() + + def ilove7(*args): + return 7 + + self.assertRaises(TypeError, self.db.createscalarfunction, "twelve", 12) # must be callable + self.assertRaises(TypeError, self.db.createscalarfunction, "twelve", 12, 27, 28) # too many params + try: + self.db.createscalarfunction("twelve", ilove7, 900) # too many args + except (apsw.SQLError, apsw.MisuseError): + # https://sqlite.org/cvstrac/tktview?tn=3875 + pass + # some unicode fun + self.db.createscalarfunction, u"twelve\N{BLACK STAR}", ilove7 + try: + # SQLite happily registers the function, but you can't + # call it + self.assertEqual(c.execute("select " + u"twelve\N{BLACK STAR}" + "(3)").fetchall(), [[7]]) + except apsw.SQLError: + pass + + self.db.createscalarfunction("seven", ilove7) + c.execute("create table foo(x,y,z)") + for i in range(10): + c.execute("insert into foo values(?,?,?)", (i, i, i)) + for i in range(10): + self.assertEqual((7, ), next(c.execute("select seven(x,y,z) from foo where x=?", (i, )))) + # clear func + self.assertRaises(apsw.BusyError, self.db.createscalarfunction, "seven", + None) # active select above so no funcs can be changed + for row in c.execute("select null"): + pass # no active sql now + self.db.createscalarfunction("seven", None) + # function names are limited to 255 characters - SQLerror is the rather unintuitive error return + try: + self.db.createscalarfunction("a" * 300, ilove7) + except (apsw.SQLError, apsw.MisuseError): + pass # see sqlite ticket #3875 + # have an error in a function + def badfunc(*args): + return 1 / 0 + + self.db.createscalarfunction("badscalarfunc", badfunc) + self.assertRaises(ZeroDivisionError, c.execute, "select badscalarfunc(*) from foo") + # return non-allowed types + for v in ({'a': 'dict'}, ['a', 'list'], self): + + def badtype(*args): + return v + + self.db.createscalarfunction("badtype", badtype) + self.assertRaises(TypeError, c.execute, "select badtype(*) from foo") + # return non-unicode string + def ilove8bit(*args): + return "\x99\xaa\xbb\xcc" + + self.db.createscalarfunction("ilove8bit", ilove8bit) + + # coverage + def bad(*args): + 1 / 0 + + self.db.createscalarfunction("bad", bad) + self.assertRaises(ZeroDivisionError, c.execute, "select bad(3)+bad(4)") + # turn a blob into a string to fail python utf8 conversion + self.assertRaises(UnicodeDecodeError, c.execute, "select bad(cast (x'fffffcfb9208' as TEXT))") + + # register same named function taking different number of arguments + for i in range(-1, 4): + self.db.createscalarfunction("multi", lambda *args: len(args), i) + gc.collect() + for row in c.execute("select multi(), multi(1), multi(1,2), multi(1,2,3), multi(1,2,3,4), multi(1,2,3,4,5)"): + self.assertEqual(row, (0, 1, 2, 3, 4, 5)) + + # deterministic flag + + # check error handling + self.assertRaises(TypeError, self.db.createscalarfunction, "twelve", deterministic="324") + self.assertRaises(TypeError, self.db.createscalarfunction, "twelve", deterministic=324) + + # check it has an effect + class Counter: # on calling returns how many times this instance has been called + num_calls = 0 + + def __call__(self): + self.num_calls += 1 + return self.num_calls + + self.db.createscalarfunction("deterministic", Counter(), deterministic=True) + self.db.createscalarfunction("nondeterministic", Counter(), deterministic=False) + self.db.createscalarfunction("unspecdeterministic", Counter()) + + # only deterministic can be used for indices + c.execute("create table td(a,b); create index tda on td(a) where deterministic()") + self.assertEqual(c.execute("select nondeterministic()=nondeterministic()").fetchall()[0][0], 0) + self.assertEqual(c.execute("select unspecdeterministic()=unspecdeterministic()").fetchall()[0][0], 0) + self.assertRaises(apsw.SQLError, c.execute, "create index tdb on td(b) where nondeterministic()") + + def testAggregateFunctions(self): + "Verify aggregate functions" + c = self.db.cursor() + c.execute("create table foo(x,y,z)") + + # aggregate function + class longest: + + def __init__(self): + self.result = "" + + def step(self, context, *args): + for i in args: + if len(str(i)) > len(self.result): + self.result = str(i) + + def final(self, context): + return self.result + + def factory(): + v = longest() + return None, v.step, v.final + + factory = staticmethod(factory) + + self.assertRaises(TypeError, self.db.createaggregatefunction, True, True, True, + True) # wrong number/type of params + self.assertRaises(TypeError, self.db.createaggregatefunction, "twelve", 12) # must be callable + + if "DEBUG" not in apsw.compile_options: + # these cause assertion failures in sqlite + try: + self.db.createaggregatefunction("twelve", longest.factory, 923) # max args is 127 + except (apsw.SQLError, apsw.MisuseError): + # used to be SQLerror then changed https://sqlite.org/cvstrac/tktview?tn=3875 + pass + self.db.createaggregatefunction("twelve", None) + + self.assertRaises(TypeError, self.db.createaggregatefunction, u"twelve\N{BLACK STAR}", 12) # must be ascii + self.db.createaggregatefunction("longest", longest.factory) + + vals = ( + ("kjfhgk", "gkjlfdhgjkhsdfkjg", + "gklsdfjgkldfjhnbnvc,mnxb,mnxcv,mbncv,mnbm,ncvx,mbncv,mxnbcv,"), # last one is deliberately the longest + ("gdfklhj", ":gjkhgfdsgfd", "gjkfhgjkhdfkjh"), + ("gdfjkhg", "gkjlfd", ""), + (1, 2, 30), + ) + + for v in vals: + c.execute("insert into foo values(?,?,?)", v) + + v = next(c.execute("select longest(x,y,z) from foo"))[0] + self.assertEqual(v, vals[0][2]) + + # SQLite doesn't allow step functions to return an error, so we have to defer to the final + def badfactory(): + + def badfunc(*args): + 1 / 0 + + def final(*args): + self.fail("This should not be executed") + return 1 + + return None, badfunc, final + + self.db.createaggregatefunction("badfunc", badfactory) + self.assertRaises(ZeroDivisionError, c.execute, "select badfunc(x) from foo") + + # error in final + def badfactory(): + + def badfunc(*args): + pass + + def final(*args): + 1 / 0 + + return None, badfunc, final + + self.db.createaggregatefunction("badfunc", badfactory) + self.assertRaises(ZeroDivisionError, c.execute, "select badfunc(x) from foo") + + # error in step and final + def badfactory(): + + def badfunc(*args): + 1 / 0 + + def final(*args): + raise ImportError() # zero div from above is what should be returned + + return None, badfunc, final + + self.db.createaggregatefunction("badfunc", badfactory) + self.assertRaises(ZeroDivisionError, c.execute, "select badfunc(x) from foo") + + # bad return from factory + def badfactory(): + + def badfunc(*args): + pass + + def final(*args): + return 0 + + return {} + + self.db.createaggregatefunction("badfunc", badfactory) + self.assertRaises(TypeError, c.execute, "select badfunc(x) from foo") + + # incorrect number of items returned + def badfactory(): + + def badfunc(*args): + pass + + def final(*args): + return 0 + + return (None, badfunc, final, badfactory) + + self.db.createaggregatefunction("badfunc", badfactory) + self.assertRaises(TypeError, c.execute, "select badfunc(x) from foo") + + # step not callable + def badfactory(): + + def badfunc(*args): + pass + + def final(*args): + return 0 + + return (None, True, final) + + self.db.createaggregatefunction("badfunc", badfactory) + self.assertRaises(TypeError, c.execute, "select badfunc(x) from foo") + + # final not callable + def badfactory(): + + def badfunc(*args): + pass + + def final(*args): + return 0 + + return (None, badfunc, True) + + self.db.createaggregatefunction("badfunc", badfactory) + self.assertRaises(TypeError, c.execute, "select badfunc(x) from foo") + + # error in factory method + def badfactory(): + 1 / 0 + + self.db.createaggregatefunction("badfunc", badfactory) + self.assertRaises(ZeroDivisionError, c.execute, "select badfunc(x) from foo") + + def testCollation(self): + "Verify collations" + # create a whole bunch to check they are freed + for i in range(1024): + self.db.createcollation("x" * i, lambda x, y: i) + for ii in range(1024): + self.db.createcollation("x" * ii, lambda x, y: ii) + + c = self.db.cursor() + + def strnumcollate(s1, s2): + "return -1 if s1s2 else 0. Items are string head and numeric tail" + # split values into two parts - the head and the numeric tail + values = [s1, s2] + for vn, v in enumerate(values): + for i in range(len(v), 0, -1): + if v[i - 1] not in "01234567890": + break + try: + v = v[:i], int(v[i:]) + except ValueError: + v = v[:i], None + values[vn] = v + # compare + if values[0] < values[1]: + return -1 # return an int + if values[0] > values[1]: + return 1 # and a long + return 0 + + self.assertRaises(TypeError, self.db.createcollation, "twelve", strnumcollate, 12) # wrong # params + self.assertRaises(TypeError, self.db.createcollation, "twelve", 12) # must be callable + self.db.createcollation("strnum", strnumcollate) + c.execute("create table foo(x)") + # adding this unicode in front improves coverage + uni = u"\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}" + vals = (uni + "file1", uni + "file7", uni + "file9", uni + "file17", uni + "file20") + valsrev = list(vals) + valsrev.reverse() # put them into table in reverse order + valsrev = valsrev[1:] + valsrev[:1] # except one out of order + c.executemany("insert into foo values(?)", [(x, ) for x in valsrev]) + for i, row in enumerate(c.execute("select x from foo order by x collate strnum")): + self.assertEqual(vals[i], row[0]) + + # collation function with an error + def collerror(*args): + return 1 / 0 + + self.db.createcollation("collerror", collerror) + self.assertRaises(ZeroDivisionError, c.execute, "select x from foo order by x collate collerror") + + # collation function that returns bad value + def collerror(*args): + return {} + + self.db.createcollation("collbadtype", collerror) + self.assertRaises(TypeError, c.execute, "select x from foo order by x collate collbadtype") + + # get error when registering + c.execute("select x from foo order by x collate strnum") # nb we don't read so cursor is still active + self.assertRaises(apsw.BusyError, self.db.createcollation, "strnum", strnumcollate) + + # unregister + for row in c: + pass + self.db.createcollation("strnum", None) + # check it really has gone + try: + c.execute("select x from foo order by x collate strnum") + except apsw.SQLError: + pass + # check statement still works + for _ in c.execute("select x from foo"): + pass + + # collation needed testing + self.assertRaises(TypeError, self.db.collationneeded, 12) + + def cn1(): + pass + + def cn2(x, y): + 1 / 0 + + def cn3(x, y): + self.assertTrue(x is self.db) + self.assertEqual(y, "strnum") + self.db.createcollation("strnum", strnumcollate) + + self.db.collationneeded(cn1) + try: + for _ in c.execute("select x from foo order by x collate strnum"): + pass + except TypeError: + pass + self.db.collationneeded(cn2) + try: + for _ in c.execute("select x from foo order by x collate strnum"): + pass + except ZeroDivisionError: + pass + self.db.collationneeded(cn3) + for _ in c.execute("select x from foo order by x collate strnum"): + pass + self.db.collationneeded(None) + self.db.createcollation("strnum", None) + + # check it really has gone + try: + c.execute("select x from foo order by x collate strnum") + except apsw.SQLError: + pass + + def testProgressHandler(self): + "Verify progress handler" + c = self.db.cursor() + phcalledcount = [0] + + def ph(): + phcalledcount[0] = phcalledcount[0] + 1 + return 0 + + # make 400 rows of random numbers + c.execute("begin ; create table foo(x)") + c.executemany("insert into foo values(?)", randomintegers(400)) + c.execute("commit") + + self.assertRaises(TypeError, self.db.setprogresshandler, 12) # must be callable + self.assertRaises(TypeError, self.db.setprogresshandler, ph, "foo") # second param is steps + self.db.setprogresshandler(ph, -17) # SQLite doesn't complain about negative numbers + self.db.setprogresshandler(ph, 20) + next(c.execute("select max(x) from foo")) + + self.assertNotEqual(phcalledcount[0], 0) + saved = phcalledcount[0] + + # put an error in the progress handler + def ph(): + return 1 / 0 + + self.db.setprogresshandler(ph, 1) + self.assertRaises(ZeroDivisionError, c.execute, "update foo set x=-10") + self.db.setprogresshandler(None) # clear ph so next line runs + # none should have taken + self.assertEqual(0, next(c.execute("select count(*) from foo where x=-10"))[0]) + # and previous ph should not have been called + self.assertEqual(saved, phcalledcount[0]) + + def ph(): + return BadIsTrue() + + self.db.setprogresshandler(ph, 1) + self.assertRaises(ZeroDivisionError, c.execute, "update foo set x=-10") + + def testChanges(self): + "Verify reporting of changes" + c = self.db.cursor() + c.execute("create table foo (x);begin") + for i in range(100): + c.execute("insert into foo values(?)", (i + 1000, )) + c.execute("commit") + c.execute("update foo set x=0 where x>=1000") + self.assertEqual(100, self.db.changes()) + c.execute("begin") + for i in range(100): + c.execute("insert into foo values(?)", (i + 1000, )) + c.execute("commit") + self.assertEqual(300, self.db.totalchanges()) + if hasattr(apsw, "faultdict"): + # check 64 bit conversion works + apsw.faultdict["ConnectionChanges64"] = True + self.assertEqual(1000000000 * 7 * 3, self.db.changes()) + + def testLastInsertRowId(self): + "Check last insert row id" + c = self.db.cursor() + c.execute("create table foo (x integer primary key)") + for i in range(10): + c.execute("insert into foo values(?)", (i, )) + self.assertEqual(i, self.db.last_insert_rowid()) + # get a 64 bit value + v = 2**40 + c.execute("insert into foo values(?)", (v, )) + self.assertEqual(v, self.db.last_insert_rowid()) + # try setting it + self.assertRaises( + TypeError, + self.db.set_last_insert_rowid, + ) + self.assertRaises(TypeError, self.db.set_last_insert_rowid, "3") + self.assertRaises(TypeError, self.db.set_last_insert_rowid, "3", 3) + self.assertRaises(OverflowError, self.db.set_last_insert_rowid, 2**40 * 2**40) + for v in -20, 0, 20, 2**32 - 1, -2**32 - 1, 2**60, -2**60: + c.execute("insert into foo values(?)", (v - 3, )) + self.assertNotEqual(v, self.db.last_insert_rowid()) + self.db.set_last_insert_rowid(v) + self.assertEqual(v, self.db.last_insert_rowid()) + + def testComplete(self): + "Completeness of SQL statement checking" + # the actual underlying routine just checks that there is a semi-colon + # at the end, not inside any quotes etc + self.assertEqual(False, apsw.complete("select * from")) + self.assertEqual(False, apsw.complete("select * from \";\"")) + self.assertEqual(False, apsw.complete("select * from \";")) + self.assertEqual(True, apsw.complete("select * from foo; select *;")) + self.assertEqual(False, apsw.complete("select * from foo where x=1")) + self.assertEqual(True, apsw.complete("select * from foo;")) + self.assertEqual(True, apsw.complete(u"select '\u9494\ua7a7';")) + self.assertRaises(TypeError, apsw.complete, 12) # wrong type + self.assertRaises(TypeError, apsw.complete) # not enough args + self.assertRaises(TypeError, apsw.complete, "foo", "bar") # too many args + + def testBusyHandling(self): + "Verify busy handling" + c = self.db.cursor() + c.execute("create table foo(x); begin") + c.executemany("insert into foo values(?)", randomintegers(400)) + c.execute("commit") + # verify it is blocked + db2 = apsw.Connection(TESTFILEPREFIX + "testdb") + c2 = db2.cursor() + c2.execute("begin exclusive") + try: + self.assertRaises(apsw.BusyError, c.execute, "begin immediate ; select * from foo") + finally: + del c2 + db2.close() + del db2 + + # close and reopen databases - sqlite will return Busy immediately to a connection + # it previously returned busy to + del c + self.db.close() + del self.db + self.db = apsw.Connection(TESTFILEPREFIX + "testdb") + db2 = apsw.Connection(TESTFILEPREFIX + "testdb") + c = self.db.cursor() + c2 = db2.cursor() + + # Put in busy handler + bhcalled = [0] + + def bh(*args): + bhcalled[0] = bhcalled[0] + 1 + if bhcalled[0] == 4: + return False + return True + + self.assertRaises(TypeError, db2.setbusyhandler, 12) # must be callable + self.assertRaises(TypeError, db2.setbusytimeout, "12") # must be int + db2.setbusytimeout( + -77) # SQLite doesn't complain about negative numbers, but if it ever does this will catch it + self.assertRaises(TypeError, db2.setbusytimeout, 77, 88) # too many args + self.db.setbusyhandler(bh) + + c2.execute("begin exclusive") + + try: + for row in c.execute("begin immediate ; select * from foo"): + self.fail("Transaction wasn't exclusive") + except apsw.BusyError: + pass + self.assertEqual(bhcalled[0], 4) + + # Close and reopen again + del c + del c2 + db2.close() + self.db.close() + del db2 + del self.db + self.db = apsw.Connection(TESTFILEPREFIX + "testdb") + db2 = apsw.Connection(TESTFILEPREFIX + "testdb") + c = self.db.cursor() + c2 = db2.cursor() + + # Put in busy timeout + TIMEOUT = 3 # seconds, must be integer as sqlite can round down to nearest second anyway + c2.execute("begin exclusive") + self.assertRaises(TypeError, self.db.setbusyhandler, "foo") + self.db.setbusytimeout(int(TIMEOUT * 1000)) + b4 = time.time() + try: + c.execute("begin immediate ; select * from foo") + except apsw.BusyError: + pass + after = time.time() + took = after - b4 + # this sometimes fails in virtualized environments due to time + # going backwards or not going forwards consistently. + if took + 1 < TIMEOUT: + print(f"Timeout was { TIMEOUT } seconds but only { took } seconds elapsed!") + self.assertTrue(took >= TIMEOUT) + + # check clearing of handler + c2.execute("rollback") + self.db.setbusyhandler(None) + b4 = time.time() + c2.execute("begin exclusive") + try: + c.execute("begin immediate ; select * from foo") + except apsw.BusyError: + pass + after = time.time() + self.assertTrue(after - b4 < TIMEOUT) + + # Close and reopen again + del c + del c2 + db2.close() + self.db.close() + del db2 + del self.db + self.db = apsw.Connection(TESTFILEPREFIX + "testdb") + db2 = apsw.Connection(TESTFILEPREFIX + "testdb") + c = self.db.cursor() + c2 = db2.cursor() + + # error in busyhandler + def bh(*args): + 1 / 0 + + c2.execute("begin exclusive") + self.db.setbusyhandler(bh) + self.assertRaises(ZeroDivisionError, c.execute, "begin immediate ; select * from foo") + del c + del c2 + db2.close() + + def bh(*args): + return BadIsTrue() + + db2 = apsw.Connection(TESTFILEPREFIX + "testdb") + c = self.db.cursor() + c2 = db2.cursor() + c2.execute("begin exclusive") + self.db.setbusyhandler(bh) + self.assertRaises(ZeroDivisionError, c.execute, "begin immediate ; select * from foo") + del c + del c2 + db2.close() + + def testBusyHandling2(self): + "Another busy handling test" + + # Based on an issue in 3.3.10 and before + con2 = apsw.Connection(TESTFILEPREFIX + "testdb") + cur = self.db.cursor() + cur2 = con2.cursor() + cur.execute("create table test(x,y)") + cur.execute("begin") + cur.execute("insert into test values(123,'abc')") + self.assertRaises(apsw.BusyError, cur2.execute, "insert into test values(456, 'def')") + cur.execute("commit") + self.assertEqual(1, next(cur2.execute("select count(*) from test where x=123"))[0]) + con2.close() + + def testInterruptHandling(self): + "Verify interrupt function" + # this is tested by having a user defined function make the interrupt + c = self.db.cursor() + c.execute("create table foo(x);begin") + c.executemany("insert into foo values(?)", randomintegers(400)) + c.execute("commit") + + def ih(*args): + self.db.interrupt() + return 7 + + self.db.createscalarfunction("seven", ih) + try: + for row in c.execute("select seven(x) from foo"): + pass + except apsw.InterruptError: + pass + # ::TODO:: raise the interrupt from another thread + + def testCommitHook(self): + "Verify commit hooks" + c = self.db.cursor() + c.execute("create table foo(x)") + c.executemany("insert into foo values(?)", randomintegers(10)) + chcalled = [0] + + def ch(): + chcalled[0] = chcalled[0] + 1 + if chcalled[0] == 4: + return 1 # abort + return 0 # continue + + self.assertRaises(TypeError, self.db.setcommithook, 12) # not callable + self.db.setcommithook(ch) + self.assertRaises(apsw.ConstraintError, c.executemany, "insert into foo values(?)", randomintegers(10)) + self.assertEqual(4, chcalled[0]) + self.db.setcommithook(None) + + def ch(): + chcalled[0] = 99 + return 1 + + self.db.setcommithook(ch) + self.assertRaises(apsw.ConstraintError, c.executemany, "insert into foo values(?)", randomintegers(10)) + # verify it was the second one that was called + self.assertEqual(99, chcalled[0]) + + # error in commit hook + def ch(): + return 1 / 0 + + self.db.setcommithook(ch) + self.assertRaises(ZeroDivisionError, c.execute, "insert into foo values(?)", (1, )) + + def ch(): + return BadIsTrue() + + self.db.setcommithook(ch) + self.assertRaises(ZeroDivisionError, c.execute, "insert into foo values(?)", (1, )) + + def testRollbackHook(self): + "Verify rollback hooks" + c = self.db.cursor() + c.execute("create table foo(x)") + rhcalled = [0] + + def rh(): + rhcalled[0] = rhcalled[0] + 1 + return 1 + + self.assertRaises(TypeError, self.db.setrollbackhook, 12) # must be callable + self.db.setrollbackhook(rh) + c.execute("begin ; insert into foo values(10); rollback") + self.assertEqual(1, rhcalled[0]) + self.db.setrollbackhook(None) + c.execute("begin ; insert into foo values(10); rollback") + self.assertEqual(1, rhcalled[0]) + + def rh(): + 1 / 0 + + self.db.setrollbackhook(rh) + # SQLite doesn't allow reporting an error from a rollback hook, so it will be seen + # in the next command (eg the select in this case) + self.assertRaises(ZeroDivisionError, c.execute, + "begin ; insert into foo values(10); rollback; select * from foo") + # check cursor still works + for row in c.execute("select * from foo"): + pass + + def testUpdateHook(self): + "Verify update hooks" + c = self.db.cursor() + c.execute("create table foo(x integer primary key, y)") + uhcalled = [] + + def uh(type, databasename, tablename, rowid): + uhcalled.append((type, databasename, tablename, rowid)) + + self.assertRaises(TypeError, self.db.setupdatehook, 12) # must be callable + self.db.setupdatehook(uh) + statements = ( + ("insert into foo values(3,4)", (apsw.SQLITE_INSERT, 3)), + ("insert into foo values(30,40)", (apsw.SQLITE_INSERT, 30)), + ( + "update foo set y=47 where x=3", + (apsw.SQLITE_UPDATE, 3), + ), + ( + "delete from foo where y=47", + (apsw.SQLITE_DELETE, 3), + ), + ) + for sql, res in statements: + c.execute(sql) + results = [(type, "main", "foo", rowid) for sql, (type, rowid) in statements] + self.assertEqual(uhcalled, results) + self.db.setupdatehook(None) + c.execute("insert into foo values(99,99)") + self.assertEqual(len(uhcalled), len(statements)) # length should have remained the same + + def uh(*args): + 1 / 0 + + self.db.setupdatehook(uh) + self.assertRaises(ZeroDivisionError, c.execute, "insert into foo values(100,100)") + self.db.setupdatehook(None) + # improve code coverage + c.execute("create table bar(x,y); insert into bar values(1,2); insert into bar values(3,4)") + + def uh(*args): + 1 / 0 + + self.db.setupdatehook(uh) + self.assertRaises(ZeroDivisionError, c.execute, "insert into foo select * from bar") + self.db.setupdatehook(None) + + # check cursor still works + c.execute("insert into foo values(1000,1000)") + self.assertEqual(1, next(c.execute("select count(*) from foo where x=1000"))[0]) + + def testProfile(self): + "Verify profiling" + # we do the test by looking for the maximum of PROFILESTEPS random + # numbers with an index present and without. The former + # should be way quicker. + c = self.db.cursor() + c.execute("create table foo(x); begin") + c.executemany("insert into foo values(?)", randomintegers(PROFILESTEPS)) + profileinfo = [] + + def profile(statement, timing): + profileinfo.append((statement, timing)) + + c.execute("commit; create index foo_x on foo(x)") + self.assertRaises(TypeError, self.db.setprofile, 12) # must be callable + self.db.setprofile(profile) + for val1 in c.execute("select max(x) from foo"): + pass # profile is only run when results are exhausted + self.db.setprofile(None) + c.execute("drop index foo_x") + self.db.setprofile(profile) + for val2 in c.execute("select max(x) from foo"): + pass + self.assertEqual(val1, val2) + self.assertTrue(len(profileinfo) >= 2) # see SQLite ticket 2157 + self.assertEqual(profileinfo[0][0], profileinfo[-1][0]) + self.assertEqual("select max(x) from foo", profileinfo[0][0]) + self.assertEqual("select max(x) from foo", profileinfo[-1][0]) + # the query using the index should take way less time + self.assertTrue(profileinfo[0][1] <= profileinfo[-1][1]) + + def profile(*args): + 1 / 0 + + self.db.setprofile(profile) + self.assertRaises(ZeroDivisionError, c.execute, "create table bar(y)") + # coverage + wasrun = [False] + + def profile(*args): + wasrun[0] = True + + def uh(*args): + 1 / 0 + + self.db.setprofile(profile) + self.db.setupdatehook(uh) + self.assertRaises(ZeroDivisionError, c.execute, "insert into foo values(3)") + self.assertEqual(wasrun[0], False) + self.db.setprofile(None) + self.db.setupdatehook(None) + + def testThreading(self): + "Verify threading behaviour" + # We used to require all operations on a connection happen in + # the same thread. Now they can happen in any thread, so we + # ensure that inuse errors are detected by doing a long + # running operation in one thread. + c = self.db.cursor() + c.execute("create table foo(x);begin;") + c.executemany("insert into foo values(?)", randomintegers(10000)) + c.execute("commit") + + vals = {"stop": False, "raised": False} + + def wt(): + try: + while not vals["stop"]: + c.execute("select min(max(x-1+x),min(x-1+x)) from foo") + except apsw.ThreadingViolationError: + vals["raised"] = True + vals["stop"] = True + + t = ThreadRunner(wt) + t.start() + # ensure thread t has started + time.sleep(0.1) + b4 = time.time() + # try to get a threadingviolation for 30 seconds + try: + try: + while not vals["stop"] and time.time() - b4 < 30: + c.execute("select * from foo") + except apsw.ThreadingViolationError: + vals["stop"] = True + vals["raised"] = True + finally: + vals["stop"] = True + t.go() + self.assertEqual(vals["raised"], True) + + def testStringsWithNulls(self): + "Verify that strings with nulls in them are handled correctly" + + c = self.db.cursor() + c.execute("create table foo(row,str)") + vals = ("a simple string", "a simple string\0with a null", "a string\0with two\0nulls", + "or even a \0\0\0\0\0\0sequence\0\0\0\0of them", u"a \u1234 unicode \ufe54 string \u0089", + u"a \u1234 unicode \ufe54 string \u0089\0and some text", + u"\N{BLACK STAR} \N{WHITE STAR} \N{LIGHTNING} \N{COMET}\0more\0than you\0can handle", + u"\N{BLACK STAR} \N{WHITE STAR} \N{LIGHTNING} \N{COMET}\0\0\0\0\0sequences\0\0\0of them") + + vals = vals + ( + "a simple string\0", + u"a \u1234 unicode \ufe54 string \u0089\0", + ) + + for i, v in enumerate(vals): + c.execute("insert into foo values(?,?)", (i, v)) + + # add function to test conversion back as well + def snap(*args): + return args[0] + + self.db.createscalarfunction("snap", snap) + + # now see what we got out + count = 0 + for row, v, fv in c.execute("select row,str,snap(str) from foo"): + count += 1 + self.assertEqual(vals[row], v) + self.assertEqual(vals[row], fv) + self.assertEqual(count, len(vals)) + + # check execute + for v in vals: + self.assertEqual(v, next(c.execute("select ?", (v, )))[0]) + # nulls not allowed in main query string, so lets check the other bits (unicode etc) + v2 = v.replace("\0", " zero ") + self.assertEqual(v2, next(c.execute("select '%s'" % (v2, )))[0]) + + # ::TODO:: check collations + + def testSharedCache(self): + "Verify setting of shared cache" + + # check parameters - wrong # or type of args + self.assertRaises(TypeError, apsw.enablesharedcache) + self.assertRaises(TypeError, apsw.enablesharedcache, "foo") + self.assertRaises(TypeError, apsw.enablesharedcache, True, None) + + # the setting can be changed at almost any time + apsw.enablesharedcache(True) + apsw.enablesharedcache(False) + + def testSerialize(self): + "Verify serialize/deserialize calls" + # check param types + self.assertRaises(TypeError, self.db.serialize) + self.assertRaises(TypeError, self.db.serialize, "a", "b") + self.assertRaises(TypeError, self.db.serialize, 3) + self.assertRaises(TypeError, self.db.deserialize, 3) + self.assertRaises(TypeError, self.db.deserialize, "main", "main") + + # SQLite implementation detail: empty db gives back None + self.assertEqual(None, self.db.serialize("main")) + self.assertEqual(None, self.db.serialize("temp")) + + # SQLite implementation detail: unknown name gives back None instead of error + self.assertEqual(None, self.db.serialize("nosuchdbname")) + + # populate with real content + self.db.cursor().execute("create table temp.foo(x); insert into temp.foo values(3), (4), (5)") + # must have content now + self.assertNotEqual(None, self.db.serialize("temp")) + self.assertTableNotExists("main.foo") + self.db.deserialize("main", self.db.serialize("temp")) + # without this renaming, things get confused between identical tables in main and temp + self.db.cursor().execute("alter table main.foo rename to bar") + self.assertTablesEqual(self.db, "bar", self.db, "foo") + # check we can modify deserialized + self.db.cursor().execute("insert into bar values(3)") + self.db.deserialize("main", self.db.serialize("temp")) + self.db.cursor().execute("alter table temp.foo rename to bar") + self.assertTablesEqual(self.db, "foo", self.db, "bar") + # add a megabyte to table + self.db.cursor().execute("insert into foo values(zeroblob(1024024))") + + # A check that various extensions (such as fts3, rtree, icu) + # actually work. We don't know if they were supposed to be + # compiled in or not so the assumption is that they aren't. + # However setup.py is being run then it sets environment variables + # saying the extensions *must* be present if they were enabled. + # See https://github.com/rogerbinns/apsw/issues/55 for what + # led to this. + def checkOptionalExtension(self, name, testquery): + try: + present = False + apsw.Connection(":memory:").cursor().execute(testquery) + present = True + except apsw.Error: + pass + if "APSW_TEST_" + name.upper() in os.environ: + self.assertEqual(present, True) + return present + + def testFTSExtension(self): + "Check FTS extensions (if present)" + for v in 3, 4, 5: + self.checkFTSExtension(v) + + def checkFTSExtension(self, v): + self.db.cursor().execute("drop table if exists foo; drop table if exists test") + if not self.checkOptionalExtension("fts" + str(v), "create virtual table foo using fts%d()" % v): + return + c = self.db.cursor() + data = { + 'cake': 'flour, eggs, milk', + 'bbq ribs': 'ribs, hot sauce', + 'mayo': 'oil, Eggs', + 'glue': 'Egg', + 'salmon': 'Fish', + 'burger': 'Mechanically recovered meat', + # From https://sqlite.org/cvstrac/wiki?p=FtsUsage + 'broccoli stew': 'broccoli peppers cheese tomatoes', + 'pumpkin stew': 'pumpkin onions garlic celery', + 'broccoli pie': 'broccoli cheese onions flour', + 'pumpkin pie': 'pumpkin sugar flour butter' + } + + c.execute("create virtual table test using fts%d(name, ingredients)" % v) + c.executemany("insert into test values(?,?)", data.items()) + + def check(pattern, expectednames): + names = [n[0] for n in c.execute("select name from test where ingredients match ?", (pattern, ))] + names.sort() + expectednames = list(expectednames) + expectednames.sort() + self.assertEqual(names, expectednames) + + check('onions cheese', ['broccoli pie']) + check('eggs OR oil', ['cake', 'mayo']) + check('"pumpkin onions"', ['pumpkin stew']) + + def testRTreeExtension(self): + "Check RTree extension if present" + if not self.checkOptionalExtension("rtree", + "create virtual table foo using rtree(one, two, three, four, five)"): + return + c = self.db.cursor() + data = ( + (1, 2, 3, 4), + (5.1, 6, 7.2, 8), + (1, 4, 9, 12), + (77, 77.1, 3, 9), + ) + c.execute("create virtual table test using rtree(ii, x1, x2, y1, y2)") + for i, row in enumerate(data): + c.execute("insert into test values(?,?,?,?,?)", (i, row[0], row[1], row[2], row[3])) + + def check(pattern, expectedrows): + rows = [n[0] for n in c.execute("select ii from test where " + pattern)] + rows.sort() + expectedrows = list(expectedrows) + expectedrows.sort() + self.assertEqual(rows, expectedrows) + + check("x1>2 AND x2<7 AND y1>17.2 AND y2<=8", []) + check("x1>5 AND x2<=6 AND y1>-11 AND y2<=8", [1]) + + def testGeopolyExtenstion(self): + "Check geopoly extension if present" + if not self.checkOptionalExtension("geopoly", "CREATE VIRTUAL TABLE newtab USING geopoly()"): + return + found = 0 + for row in self.db.cursor().execute( + "CREATE VIRTUAL TABLE newtab USING geopoly();" + "INSERT INTO newtab(_shape) VALUES('[[0,0],[1,0],[0.5,1],[0,0]]');" + "SELECT * FROM newtab WHERE geopoly_overlap(_shape, $1);", ("[[0,0],[1,0],[0.5,1],[0,0]]", )): + found += 1 + self.assertEqual(found, 1) + + def testICUExtension(self): + "Check ICU extension if present" + if not self.checkOptionalExtension("icu", "select lower('I', 'tr_tr')"): + return + + c = self.db.cursor() + + # we compare SQLite standard vs icu + def check(text, locale, func="lower", equal=False): + q = "select " + func + "(?%s)" + sqlite = c.execute(q % ("", ), (text, )).fetchall() + icu = c.execute(q % (",'" + locale + "'", ), (text, )).fetchall() + if equal: + self.assertEqual(sqlite, icu) + else: + self.assertNotEqual(sqlite, icu) + + check("I", "tr_tr") + check("I", "en_us", equal=True) + + def testJSON1Extension(self): + if not self.checkOptionalExtension("json1", "select json('{}')"): + return + # some sanity checks that it is working + l = self.db.cursor().execute("select json_array_length('[1,2,3,4]')").fetchall()[0][0] + self.assertEqual(l, 4) + l = self.db.cursor().execute( + """select json_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.c[2].f')""").fetchall()[0][0] + self.assertEqual(l, 7) + + def testTracebacks(self): + "Verify augmented tracebacks" + + def badfunc(*args): + zebra = 3 + 1 / 0 + + self.db.createscalarfunction("badfunc", badfunc) + try: + c = self.db.cursor() + c.execute("select badfunc(1,'two',3.14)") + self.fail("Exception should have occurred") + except ZeroDivisionError: + tb = sys.exc_info()[2] + frames = [] + while tb: + frames.append(tb.tb_frame) + tb = tb.tb_next + except: + self.fail("Wrong exception type") + + frames.reverse() + frame = frames[1] # frame[0] is badfunc above + self.assertTrue(frame.f_code.co_filename.endswith(".c")) + self.assertTrue(frame.f_lineno > 100) + self.assertTrue(frame.f_code.co_name.endswith("-badfunc")) + # check local variables + if platform.python_implementation() != "PyPy": + l = frame.f_locals + self.assertIn("NumberOfArguments", l) + self.assertEqual(l["NumberOfArguments"], 3) + + def testLoadExtension(self): + "Check loading of extensions" + # unicode issues + # they need to be enabled first (off by default) + if self.db.config(apsw.SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, -1): + # someone wanted extension loading on by default! Turn it back off + self.db.config(apsw.SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, 0) + self.assertRaises(apsw.ExtensionLoadingError, self.db.loadextension, LOADEXTENSIONFILENAME) + self.assertEqual(self.db.config(apsw.SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, -1), 0) + self.db.enableloadextension(False) + self.assertRaises(ZeroDivisionError, self.db.enableloadextension, BadIsTrue()) + # should still be disabled + self.assertEqual(self.db.config(apsw.SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, 0), 0) + self.assertRaises(apsw.ExtensionLoadingError, self.db.loadextension, LOADEXTENSIONFILENAME) + self.assertEqual(self.db.config(apsw.SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, 1), 1) + self.db.loadextension(LOADEXTENSIONFILENAME) + self.assertEqual(self.db.config(apsw.SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, 0), 0) + self.db.enableloadextension(True) + # make sure it checks args + self.assertRaises(TypeError, self.db.loadextension) + self.assertRaises(TypeError, self.db.loadextension, 12) + self.assertRaises(TypeError, self.db.loadextension, "foo", 12) + self.assertRaises(TypeError, self.db.loadextension, "foo", "bar", 12) + self.db.loadextension(LOADEXTENSIONFILENAME) + c = self.db.cursor() + self.assertEqual(1, next(c.execute("select half(2)"))[0]) + # second entry point hasn't been called yet + self.assertRaises(apsw.SQLError, c.execute, "select doubleup(2)") + # load using other entry point + self.assertRaises(apsw.ExtensionLoadingError, self.db.loadextension, LOADEXTENSIONFILENAME, "doesntexist") + self.db.loadextension(LOADEXTENSIONFILENAME, "alternate_sqlite3_extension_init") + self.assertEqual(4, next(c.execute("select doubleup(2)"))[0]) + + def testMakeSqliteMsgFromException(self): + "Test C function that converts exception into SQLite error code" + + class Source: + + def Create1(self, *args): + e = apsw.IOError() + e.extendedresult = apsw.SQLITE_IOERR_ACCESS + raise e + + def Create2(self, *args): + e = apsw.IOError() + e.extendedresult = (0x80 << 32) + apsw.SQLITE_IOERR_ACCESS # bigger than 32 bits + raise e + + self.db.createmodule("foo", Source()) + for i in "1", "2": + Source.Create = getattr(Source, "Create" + i) + try: + self.db.cursor().execute("create virtual table vt using foo()") + 1 / 0 + except: + klass, value, tb = sys.exc_info() + + self.assertEqual(klass, apsw.IOError) + self.assertTrue(isinstance(value, apsw.IOError)) + self.assertEqual(value.extendedresult & ((0xffff << 16) | 0xffff), apsw.SQLITE_IOERR_ACCESS) + + def testVtables(self): + "Test virtual table functionality" + + data = ( # row 0 is headers, column 0 is rowid + ("rowid", "name", "number", "item", "description"), + (1, "Joe Smith", 1.1, u"\u00f6\u1234", "foo"), + (6000000000, "Road Runner", -7.3, u"\u00f6\u1235", "foo"), + (77, "Fred", 0, u"\u00f6\u1236", "foo"), + ) + + dataschema = "create table this_should_be_ignored" + str(data[0][1:]) + # a query that will get constraints on every column + allconstraints = "select rowid,* from foo where rowid>-1000 and name>='A' and number<=12.4 and item>'A' and description=='foo' order by item" + allconstraintsl = [ + (-1, apsw.SQLITE_INDEX_CONSTRAINT_GT), # rowid > + (0, apsw.SQLITE_INDEX_CONSTRAINT_GE), # name >= + (1, apsw.SQLITE_INDEX_CONSTRAINT_LE), # number <= + (2, apsw.SQLITE_INDEX_CONSTRAINT_GT), # item > + (3, apsw.SQLITE_INDEX_CONSTRAINT_EQ), # description == + ] + + for i in range(20): + self.db.createmodule("x" * i, lambda x: i) + + # If shared cache is enabled then vtable creation is supposed to fail + # See https://sqlite.org/cvstrac/tktview?tn=3144 + try: + apsw.enablesharedcache(True) + db = apsw.Connection(TESTFILEPREFIX + "testdb2") + db.createmodule("y", lambda x: 2) + finally: + apsw.enablesharedcache(False) + + # The testing uses a different module name each time. SQLite + # doc doesn't define the semantics if a 2nd module is + # registered with the same name as an existing one and I was + # getting coredumps. It looks like issues inside SQLite. + + cur = self.db.cursor() + # should fail since module isn't registered + self.assertRaises(apsw.SQLError, cur.execute, "create virtual table vt using testmod(x,y,z)") + # wrong args + self.assertRaises(TypeError, self.db.createmodule, 1, 2, 3) + # give a bad object + self.db.createmodule("testmod", 12) # next line fails due to lack of Create method + self.assertRaises(AttributeError, cur.execute, "create virtual table xyzzy using testmod(x,y,z)") + + class Source: + + def __init__(self, *expectargs): + self.expectargs = expectargs + + def Create(self, *args): # db, modname, dbname, tablename, args + if self.expectargs != args[1:]: + raise ValueError("Create arguments are not correct. Expected " + str(self.expectargs) + + " but got " + str(args[1:])) + 1 / 0 + + def CreateErrorCode(self, *args): + # This makes sure that sqlite error codes happen. The coverage checker + # is what verifies the code actually works. + raise apsw.BusyError("foo") + + def CreateUnicodeException(self, *args): + raise Exception( + u"\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}\N{LATIN SMALL LETTER A WITH TILDE}\N{LATIN SMALL LETTER O WITH DIAERESIS}" + ) + + def CreateBadSchemaType(self, *args): + return 12, None + + def CreateBadSchema(self, *args): + return "this isn't remotely valid sql", None + + def CreateWrongNumReturns(self, *args): + return "way", "too", "many", "items", 3 + + def CreateBadSequence(self, *args): + + class badseq(object): + + def __getitem__(self, which): + if which != 0: + 1 / 0 + return 12 + + def __len__(self): + return 2 + + return badseq() + + # check Create does the right thing - we don't include db since it creates a circular reference + self.db.createmodule("testmod1", Source("testmod1", "main", "xyzzy", "1", '"one"')) + self.assertRaises(ZeroDivisionError, cur.execute, 'create virtual table xyzzy using testmod1(1,"one")') + # unicode + uni = u"\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}\N{LATIN SMALL LETTER A WITH TILDE}\N{LATIN SMALL LETTER O WITH DIAERESIS}" + + self.db.createmodule("testmod1dash1", Source("testmod1dash1", "main", uni, "1", '"' + uni + '"')) + self.assertRaises(ZeroDivisionError, cur.execute, + u'create virtual table %s using testmod1dash1(1,"%s")' % (uni, uni)) + Source.Create = Source.CreateErrorCode + self.assertRaises(apsw.BusyError, cur.execute, 'create virtual table xyzzz using testmod1(2, "two")') + Source.Create = Source.CreateUnicodeException + self.assertRaises(Exception, cur.execute, 'create virtual table xyzzz using testmod1(2, "two")') + Source.Create = Source.CreateBadSchemaType + self.assertRaises(TypeError, cur.execute, 'create virtual table xyzzz using testmod1(2, "two")') + Source.Create = Source.CreateBadSchema + self.assertRaises(apsw.SQLError, cur.execute, 'create virtual table xyzzz2 using testmod1(2, "two")') + Source.Create = Source.CreateWrongNumReturns + self.assertRaises(TypeError, cur.execute, 'create virtual table xyzzz2 using testmod1(2, "two")') + Source.Create = Source.CreateBadSequence + self.assertRaises(ZeroDivisionError, cur.execute, 'create virtual table xyzzz2 using testmod1(2, "two")') + + # a good version of Source + class Source: + + def Create(self, *args): + return dataschema, VTable(list(data)) + + Connect = Create + + class VTable: + + # A set of results from bestindex which should all generate TypeError. + # Coverage checking will ensure all the code is appropriately tickled + badbestindex = ( + 12, + (12, ), + ((), ), + (((), ), ), + ((((), ), ), ), + (((((), ), ), ), ), + ((None, None, None, None, "bad"), ), + ((0, None, (0, ), None, None), ), + ((("bad", True), None, None, None, None), ), + (((0, True), "bad", None, None, None), ), + (None, "bad"), + [4, (3, True), [2, False], 1, [0]], + ) + numbadbextindex = len(badbestindex) + + def __init__(self, data): + self.data = data + self.bestindex3val = 0 + + def BestIndex1(self, wrong, number, of, arguments): + 1 / 0 + + def BestIndex2(self, *args): + 1 / 0 + + def BestIndex3(self, constraints, orderbys): + retval = self.badbestindex[self.bestindex3val] + self.bestindex3val += 1 + if self.bestindex3val >= self.numbadbextindex: + self.bestindex3val = 0 + return retval + + def BestIndex4(self, constraints, orderbys): + # this gives ValueError ("bad" is not a float) + return (None, 12, u"\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}", "anything", "bad") + + def BestIndex5(self, constraints, orderbys): + # unicode error + return (None, None, "\xde\xad\xbe\xef") + + def BestIndex6(self, constraints, orderbys): + return ((0, 1, (2, BadIsTrue()), 3, 4), ) + + def BestIndex7(self, constraints, orderbys): + return (None, 77, "foo", BadIsTrue(), 99) + + _bestindexreturn = 99 + + def BestIndex99(self, constraints, orderbys): + cl = list(constraints) + cl.sort() + assert allconstraintsl == cl + assert orderbys == ((2, False), ) + retval = ([4, (3, True), [2, False], 1, (0, False)], 997, u"\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}", + False, 99)[:self._bestindexreturn] + return retval + + def BestIndexGood(self, constraints, orderbys): + return None + + def BestIndexGood2(self, constraints, orderbys): + return [] # empty list is same as None + + def Open(self): + return Cursor(self) + + def Open1(self, wrong, number, of, arguments): + 1 / 0 + + def Open2(self): + 1 / 0 + + def Open3(self): + return None + + def Open99(self): + return Cursor(self) + + UpdateInsertRow1 = None + + def UpdateInsertRow2(self, too, many, args): + 1 / 0 + + def UpdateInsertRow3(self, rowid, fields): + 1 / 0 + + def UpdateInsertRow4(self, rowid, fields): + assert rowid is None + return None + + def UpdateInsertRow5(self, rowid, fields): + assert rowid is None + return "this is not a number" + + def UpdateInsertRow6(self, rowid, fields): + assert rowid is None + return -922337203685477580799 # too big + + def UpdateInsertRow7(self, rowid, fields): + assert rowid is None + return 9223372036854775807 # ok + + def UpdateInsertRow8(self, rowid, fields): + assert rowid is not None + assert rowid == -12 + return "this should be ignored since rowid was supplied" + + def UpdateChangeRow1(self, too, many, args, methinks): + 1 / 0 + + def UpdateChangeRow2(self, rowid, newrowid, fields): + 1 / 0 + + def UpdateChangeRow3(self, rowid, newrowid, fields): + assert newrowid == rowid + + def UpdateChangeRow4(self, rowid, newrowid, fields): + assert newrowid == rowid + 20 + + def UpdateDeleteRow1(self, too, many, args): + 1 / 0 + + def UpdateDeleteRow2(self, rowid): + 1 / 0 + + def UpdateDeleteRow3(self, rowid): + assert rowid == 77 + + def Disconnect1(self, too, many, args): + 1 / 0 + + def Disconnect2(self): + 1 / 0 + + def Disconnect3(self): + pass + + def Destroy1(self, too, many, args): + 1 / 0 + + def Destroy2(self): + 1 / 0 + + def Destroy3(self): + pass + + def Begin1(self, too, many, args): + 1 / 0 + + def Begin2(self): + 1 / 0 + + def Begin3(self): + pass + + def Sync(self): + pass + + def Commit(self): + pass + + def Rollback(self): + pass + + def Rename1(self, too, many, args): + 1 / 0 + + def Rename2(self, x): + 1 / 0 + + def Rename3(self, x): + return ["thisshouldbeignored" * 25, [1]] + + def FindFunction1(self, too, many, args): + 1 / 0 + + def FindFunction2(self, name, nargs): + 1 / 0 + + def FindFunction3(self, name, nargs): + return "this isn't a function" + + def FindFunction4(self, name, nargs): + if nargs == 2: + return lambda x, y: x + y + return None + + class Cursor: + + _bestindexreturn = 99 + + def __init__(self, table): + self.table = table + + def Filter1(self, toofewargs): + 1 / 0 + + def Filter2(self, *args): + 1 / 0 + + def Filter99(self, idxnum, idxstr, constraintargs): + self.pos = 1 # row 0 is headers + if self._bestindexreturn == 0: + assert idxnum == 0 + assert idxstr == None + assert constraintargs == () + return + if self._bestindexreturn == 1: + assert idxnum == 0 + assert idxstr == None + assert constraintargs == ('foo', 'A', 12.4, 'A', -1000) + return + if self._bestindexreturn == 2: + assert idxnum == 997 + assert idxstr == None + assert constraintargs == ('foo', 'A', 12.4, 'A', -1000) + return + # 3 or more + assert idxnum == 997 + assert idxstr == u"\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}" + assert constraintargs == ('foo', 'A', 12.4, 'A', -1000) + + def Filter(self, *args): + self.Filter99(*args) + 1 / 0 + + def FilterGood(self, *args): + self.pos = 1 # row 0 is headers + + def Eof1(self, toomany, args): + 1 / 0 + + def Eof2(self): + 1 / 0 + + def Eof3(self): + return BadIsTrue() + + def Eof99(self): + return not (self.pos < len(self.table.data)) + + def Rowid1(self, too, many, args): + 1 / 0 + + def Rowid2(self): + 1 / 0 + + def Rowid3(self): + return "cdrom" + + def Rowid99(self): + return self.table.data[self.pos][0] + + def Column1(self): + 1 / 0 + + def Column2(self, too, many, args): + 1 / 0 + + def Column3(self, col): + 1 / 0 + + def Column4(self, col): + return self # bad type + + def Column99(self, col): + return self.table.data[self.pos][col + 1] # col 0 is row id + + def Close1(self, too, many, args): + 1 / 0 + + def Close2(self): + 1 / 0 + + def Close99(self): + del self.table # deliberately break ourselves + + def Next1(self, too, many, args): + 1 / 0 + + def Next2(self): + 1 / 0 + + def Next99(self): + self.pos += 1 + + # use our more complete version + self.db.createmodule("testmod2", Source()) + cur.execute("create virtual table foo using testmod2(2,two)") + # are missing/mangled methods detected correctly? + self.assertRaises(AttributeError, cur.execute, "select rowid,* from foo order by number") + VTable.BestIndex = VTable.BestIndex1 + self.assertRaises(TypeError, cur.execute, "select rowid,* from foo order by number") + VTable.BestIndex = VTable.BestIndex2 + self.assertRaises(ZeroDivisionError, cur.execute, "select rowid,* from foo order by number") + # check bestindex results + VTable.BestIndex = VTable.BestIndex3 + for i in range(VTable.numbadbextindex): + self.assertRaises(TypeError, cur.execute, allconstraints) + VTable.BestIndex = VTable.BestIndex4 + self.assertRaises(ValueError, cur.execute, allconstraints) + VTable.BestIndex = VTable.BestIndex6 + self.assertRaises(ZeroDivisionError, cur.execute, allconstraints) + VTable.BestIndex = VTable.BestIndex7 + self.assertRaises(ZeroDivisionError, cur.execute, allconstraints) + + # check varying number of return args from bestindex + VTable.BestIndex = VTable.BestIndex99 + for i in range(6): + VTable._bestindexreturn = i + Cursor._bestindexreturn = i + try: + cur.execute(" " + allconstraints + + " " * i) # defeat statement cache - bestindex is called during prepare + except ZeroDivisionError: + pass + + # error cases ok, return real values and move on to cursor methods + del VTable.Open + del Cursor.Filter + self.assertRaises(AttributeError, cur.execute, allconstraints) # missing open + VTable.Open = VTable.Open1 + self.assertRaises(TypeError, cur.execute, allconstraints) + VTable.Open = VTable.Open2 + self.assertRaises(ZeroDivisionError, cur.execute, allconstraints) + VTable.Open = VTable.Open3 + self.assertRaises(AttributeError, cur.execute, allconstraints) + VTable.Open = VTable.Open99 + self.assertRaises(AttributeError, cur.execute, allconstraints) + # put in filter + Cursor.Filter = Cursor.Filter1 + self.assertRaises(TypeError, cur.execute, allconstraints) + Cursor.Filter = Cursor.Filter2 + self.assertRaises(ZeroDivisionError, cur.execute, allconstraints) + Cursor.Filter = Cursor.Filter99 + self.assertRaises(AttributeError, cur.execute, allconstraints) + Cursor.Eof = Cursor.Eof1 + self.assertRaises(TypeError, cur.execute, allconstraints) + Cursor.Eof = Cursor.Eof2 + self.assertRaises(ZeroDivisionError, cur.execute, allconstraints) + Cursor.Eof = Cursor.Eof3 + self.assertRaises(ZeroDivisionError, cur.execute, allconstraints) + Cursor.Eof = Cursor.Eof99 + self.assertRaises(AttributeError, cur.execute, allconstraints) + # now onto to rowid + Cursor.Rowid = Cursor.Rowid1 + self.assertRaises(TypeError, cur.execute, allconstraints) + Cursor.Rowid = Cursor.Rowid2 + self.assertRaises(ZeroDivisionError, cur.execute, allconstraints) + Cursor.Rowid = Cursor.Rowid3 + self.assertRaises(ValueError, cur.execute, allconstraints) + Cursor.Rowid = Cursor.Rowid99 + self.assertRaises(AttributeError, cur.execute, allconstraints) + # column + Cursor.Column = Cursor.Column1 + self.assertRaises(TypeError, cur.execute, allconstraints) + Cursor.Column = Cursor.Column2 + self.assertRaises(TypeError, cur.execute, allconstraints) + Cursor.Column = Cursor.Column3 + self.assertRaises(ZeroDivisionError, cur.execute, allconstraints) + Cursor.Column = Cursor.Column4 + self.assertRaises(TypeError, cur.execute, allconstraints) + Cursor.Column = Cursor.Column99 + try: + for row in cur.execute(allconstraints): + pass + except AttributeError: + pass + # next + Cursor.Next = Cursor.Next1 + try: + for row in cur.execute(allconstraints): + pass + except TypeError: + pass + Cursor.Next = Cursor.Next2 + try: + for row in cur.execute(allconstraints): + pass + except ZeroDivisionError: + pass + Cursor.Next = Cursor.Next99 + try: + for row in cur.execute(allconstraints): + pass + except AttributeError: + pass + # close + Cursor.Close = Cursor.Close1 + try: + for row in cur.execute(allconstraints): + pass + except TypeError: + pass + Cursor.Close = Cursor.Close2 + try: + for row in cur.execute(allconstraints): + pass + except ZeroDivisionError: + pass + Cursor.Close = Cursor.Close99 + + # update (insert) + sql = "insert into foo (name, description) values('gunk', 'foo')" + self.assertRaises(AttributeError, cur.execute, sql) + VTable.UpdateInsertRow = VTable.UpdateInsertRow1 + self.assertRaises(TypeError, cur.execute, sql) + VTable.UpdateInsertRow = VTable.UpdateInsertRow2 + self.assertRaises(TypeError, cur.execute, sql) + VTable.UpdateInsertRow = VTable.UpdateInsertRow3 + self.assertRaises(ZeroDivisionError, cur.execute, sql) + VTable.UpdateInsertRow = VTable.UpdateInsertRow4 + self.assertRaises(TypeError, cur.execute, sql) + VTable.UpdateInsertRow = VTable.UpdateInsertRow5 + self.assertRaises(ValueError, cur.execute, sql) + VTable.UpdateInsertRow = VTable.UpdateInsertRow6 + self.assertRaises(OverflowError, cur.execute, sql) + VTable.UpdateInsertRow = VTable.UpdateInsertRow7 + cur.execute(sql) + self.assertEqual(self.db.last_insert_rowid(), 9223372036854775807) + VTable.UpdateInsertRow = VTable.UpdateInsertRow8 + cur.execute("insert into foo (rowid,name, description) values(-12,'gunk', 'foo')") + + # update (change) + VTable.BestIndex = VTable.BestIndexGood + Cursor.Filter = Cursor.FilterGood + sql = "update foo set description=='bar' where description=='foo'" + self.assertRaises(AttributeError, cur.execute, sql) + VTable.UpdateChangeRow = VTable.UpdateChangeRow1 + self.assertRaises(TypeError, cur.execute, sql) + VTable.UpdateChangeRow = VTable.UpdateChangeRow2 + self.assertRaises(ZeroDivisionError, cur.execute, sql) + VTable.UpdateChangeRow = VTable.UpdateChangeRow3 + cur.execute(sql) + VTable.UpdateChangeRow = VTable.UpdateChangeRow4 + cur.execute("update foo set rowid=rowid+20 where 1") + + # update (delete) + VTable.BestIndex = VTable.BestIndexGood2 # improves code coverage + sql = "delete from foo where name=='Fred'" + self.assertRaises(AttributeError, cur.execute, sql) + VTable.UpdateDeleteRow = VTable.UpdateDeleteRow1 + self.assertRaises(TypeError, cur.execute, sql) + VTable.UpdateDeleteRow = VTable.UpdateDeleteRow2 + self.assertRaises(ZeroDivisionError, cur.execute, sql) + VTable.UpdateDeleteRow = VTable.UpdateDeleteRow3 + cur.execute(sql) + + # rename + sql = "alter table foo rename to bar" + VTable.Rename = VTable.Rename1 + self.assertRaises(TypeError, cur.execute, sql) + VTable.Rename = VTable.Rename2 + self.assertRaises(ZeroDivisionError, cur.execute, sql) + VTable.Rename = VTable.Rename3 + # this is to catch memory leaks + cur.execute(sql) + del VTable.Rename # method is optional + cur.execute("alter table bar rename to foo") # put things back + + # findfunction + # mess with overload function first + self.assertRaises(TypeError, self.db.overloadfunction, 1, 1) + # https://sqlite.org/cvstrac/tktview?tn=3507 + # self.db.overloadfunction("a"*1024, 1) + self.db.overloadfunction("xyz", 2) + self.assertRaises(apsw.SQLError, cur.execute, "select xyz(item,description) from foo") + VTable.FindFunction = VTable.FindFunction1 + self.assertRaises(TypeError, cur.execute, "select xyz(item,description) from foo ") + VTable.FindFunction = VTable.FindFunction2 + self.assertRaises(ZeroDivisionError, cur.execute, "select xyz(item,description) from foo ") + VTable.FindFunction = VTable.FindFunction3 + try: + for row in cur.execute("select xyz(item,description) from foo "): + pass + 1 / 0 + except TypeError: + pass + # this should work + VTable.FindFunction = VTable.FindFunction4 + for row in cur.execute("select xyz(item,description) from foo "): + pass + + # transaction control + # Begin, Sync, Commit and rollback all use the same underlying code + sql = "delete from foo where name=='Fred'" + VTable.Begin = VTable.Begin1 + self.assertRaises(TypeError, cur.execute, sql) + VTable.Begin = VTable.Begin2 + self.assertRaises(ZeroDivisionError, cur.execute, sql) + VTable.Begin = VTable.Begin3 + cur.execute(sql) + + # disconnect - sqlite ignores any errors + db = apsw.Connection(TESTFILEPREFIX + "testdb") + db.createmodule("testmod2", Source()) + cur2 = db.cursor() + for _ in cur2.execute("select * from foo"): + pass + VTable.Disconnect = VTable.Disconnect1 + self.assertRaises(TypeError, db.close) # nb close succeeds! + self.assertRaises(apsw.CursorClosedError, cur2.execute, "select * from foo") + del db + db = apsw.Connection(TESTFILEPREFIX + "testdb") + db.createmodule("testmod2", Source()) + cur2 = db.cursor() + for _ in cur2.execute("select * from foo"): + pass + VTable.Disconnect = VTable.Disconnect2 + self.assertRaises(ZeroDivisionError, db.close) # nb close succeeds! + self.assertRaises(apsw.CursorClosedError, cur2.execute, "select * from foo") + del db + db = apsw.Connection(TESTFILEPREFIX + "testdb") + db.createmodule("testmod2", Source()) + cur2 = db.cursor() + for _ in cur2.execute("select * from foo"): + pass + VTable.Disconnect = VTable.Disconnect3 + db.close() + del db + + # destroy + VTable.Destroy = VTable.Destroy1 + self.assertRaises(TypeError, cur.execute, "drop table foo") + VTable.Destroy = VTable.Destroy2 + self.assertRaises(ZeroDivisionError, cur.execute, "drop table foo") + VTable.Destroy = VTable.Destroy3 + cur.execute("drop table foo") + self.db.close() + + def testVTableExample(self): + "Tests vtable example code" + + # Make sure vtable code actually works by comparing SQLite + # results against manually computed results + + def getfiledata(directories): + columns = None + data = [] + counter = 1 + for directory in directories: + for f in os.listdir(directory): + if not os.path.isfile(os.path.join(directory, f)): + continue + counter += 1 + try: + st = os.stat(os.path.join(directory, f)) + if columns is None: + columns = ["rowid", "name", "directory"] + [x for x in dir(st) if x.startswith("st_")] + data.append([counter, f, directory] + [getattr(st, x) for x in columns[3:]]) + except OSError: + # we ignore file and permission errors in this example + pass + return columns, data + + class Source: + + def Create(self, db, modulename, dbname, tablename, *args): + columns, data = getfiledata([eval(a) for a in args]) # eval strips off layer of quotes + schema = "create table foo(" + ','.join(["'%s'" % (x, ) for x in columns[1:]]) + ")" + return schema, Table(columns, data) + + Connect = Create + + class Table: + + def __init__(self, columns, data): + self.columns = columns + self.data = data + + def BestIndex(self, *args): + return None + + def Open(self): + return Cursor(self) + + def Disconnect(self): + pass + + Destroy = Disconnect + + class Cursor: + + def __init__(self, table): + self.table = table + + def Filter(self, *args): + self.pos = 0 + + def Eof(self): + return self.pos >= len(self.table.data) + + def Rowid(self): + return self.table.data[self.pos][0] + + def Column(self, col): + return self.table.data[self.pos][1 + col] + + def Next(self): + self.pos += 1 + + def Close(self): + pass + + paths = [x.replace("\\", "/") for x in sys.path if len(x) and os.path.isdir(x)] + cols, data = getfiledata(paths) + self.db.createmodule("filesource", Source()) + cur = self.db.cursor() + args = ",".join(["'%s'" % (x, ) for x in paths]) + cur.execute("create virtual table files using filesource(" + args + ")") + + # Find the largest file (SQL) + for bigsql in cur.execute("select st_size,name,directory from files order by st_size desc limit 1"): + pass + # Find the largest (manually) + colnum = cols.index("st_size") + bigmanual = (0, "", "") + for file in data: + if file[colnum] > bigmanual[0]: + bigmanual = file[colnum], file[1], file[2] + + self.assertEqual(bigsql, bigmanual) + + # Find the oldest file (SQL) + for oldestsql in cur.execute("select st_ctime,name,directory from files order by st_ctime limit 1"): + pass + # Find the oldest (manually) + colnum = cols.index("st_ctime") + oldestmanual = (99999999999999999, "", "") + for file in data: + if file[colnum] < oldestmanual[0]: + oldestmanual = file[colnum], file[1], file[2] + + self.assertEqual(oldestmanual, oldestsql) + + def testClosingChecks(self): + "Check closed connection/blob/cursor is correctly detected" + cur = self.db.cursor() + rowid = next( + cur.execute("create table foo(x blob); insert into foo values(zeroblob(98765)); select rowid from foo"))[0] + blob = self.db.blobopen("main", "foo", "x", rowid, True) + blob.close() + nargs = self.blob_nargs + for func in [x for x in dir(blob) if not x.startswith("__") and not x in ("close", )]: + args = ("one", "two", "three")[:nargs.get(func, 0)] + try: + getattr(blob, func)(*args) + self.fail(f"blob method/attribute { func } didn't notice that the connection is closed") + except ValueError: # we issue ValueError to be consistent with file objects + pass + + self.db.close() + nargs = self.connection_nargs + tested = 0 + for func in [x for x in dir(self.db) if x in nargs or (not x.startswith("__") and not x in ("close", ))]: + tested += 1 + args = ("one", "two", "three")[:nargs.get(func, 0)] + + try: + # attributes come back as None after a close + func = getattr(self.db, func) + if func: + func(*args) + self.fail(f"connection method/attribute { func } didn't notice that the connection is closed") + except apsw.ConnectionClosedError: + pass + self.assertTrue(tested > len(nargs)) + + # do the same thing, but for cursor + nargs = self.cursor_nargs + tested = 0 + for func in [x for x in dir(cur) if not x.startswith("__") and not x in ("close", )]: + tested += 1 + args = ("one", "two", "three")[:nargs.get(func, 0)] + try: + getattr(cur, func)(*args) + self.fail(f"cursor method/attribute { func } didn't notice that the connection is closed") + except apsw.CursorClosedError: + pass + self.assertTrue(tested >= len(nargs)) + + def testClosing(self): + "Verify behaviour of close() functions" + cur = self.db.cursor() + cur.execute("select 3;select 4") + self.assertRaises(apsw.IncompleteExecutionError, cur.close) + # now force it + self.assertRaises(TypeError, cur.close, sys) + self.assertRaises(TypeError, cur.close, 1, 2, 3) + cur.close(True) + l = [self.db.cursor() for i in range(1234)] + cur = self.db.cursor() + cur.execute("select 3; select 4; select 5") + l2 = [self.db.cursor() for i in range(1234)] + self.assertRaises(apsw.IncompleteExecutionError, self.db.close) + self.assertRaises(TypeError, self.db.close, sys) + self.assertRaises(TypeError, self.db.close, 1, 2, 3) + self.db.close(True) # force it + self.db.close() # should be fine now + # coverage - close cursor after closing db + db = apsw.Connection(":memory:") + cur = db.cursor() + db.close() + cur.close() + + def testLargeObjects(self): + "Verify handling of large strings/blobs (>2GB) [requires 64 bit platform]" + assert is64bit + # For binary/blobs I use an anonymous area slightly larger than 2GB chunk of memory, but don't touch any of it + import mmap + f = mmap.mmap(-1, 2 * 1024 * 1024 * 1024 + 25000) + c = self.db.cursor() + c.execute("create table foo(theblob)") + self.assertRaises(apsw.TooBigError, c.execute, "insert into foo values(?)", (f, )) + c.execute("insert into foo values(?)", ("jkghjk" * 1024, )) + b = self.db.blobopen("main", "foo", "theblob", self.db.last_insert_rowid(), True) + b.read(1) + self.assertRaises(ValueError, b.write, f) + + def func(): + return f + + self.db.createscalarfunction("toobig", func) + self.assertRaises(apsw.TooBigError, c.execute, "select toobig()") + f.close() + # Other testing by fault injection + if not hasattr(apsw, "faultdict"): + return + + def testErrorCodes(self): + "Verify setting of result codes on error/exception" + fname = TESTFILEPREFIX + "gunk-errcode-test" + write_whole_file(fname, "wb", b"A" * 8192) + db = None + try: + # The exception could be thrown on either of these lines + # depending on several factors + db = apsw.Connection(fname) + db.cursor().execute("select * from sqlite_master") + 1 / 0 # should not be reachable + except: + klass, e, tb = sys.exc_info() + self.assertTrue(isinstance(e, apsw.NotADBError)) + self.assertEqual(e.result, apsw.SQLITE_NOTADB) + self.assertEqual(e.extendedresult & 0xff, apsw.SQLITE_NOTADB) + if db is not None: + db.close(True) + + try: + deletefile(fname) + except: + pass + + def testLimits(self): + "Verify setting and getting limits" + self.assertRaises(TypeError, self.db.limit, "apollo", 11) + c = self.db.cursor() + c.execute("create table foo(x)") + c.execute("insert into foo values(?)", ("x" * 1024, )) + old = self.db.limit(apsw.SQLITE_LIMIT_LENGTH) + self.db.limit(apsw.SQLITE_LIMIT_LENGTH, 1023) + self.assertRaises(apsw.TooBigError, c.execute, "insert into foo values(?)", ("y" * 1024, )) + self.assertEqual(1023, self.db.limit(apsw.SQLITE_LIMIT_LENGTH, 0)) + # bug in sqlite - see https://sqlite.org/cvstrac/tktview?tn=3085 + if False: + c.execute("insert into foo values(?)", ("x" * 1024, )) + self.assertEqual(apsw.SQLITE_MAX_LENGTH, self.db.limit(apsw.SQLITE_LIMIT_LENGTH)) + + def testConnectionHooks(self): + "Verify connection hooks" + del apsw.connection_hooks + try: + db = apsw.Connection(":memory:") + except AttributeError: + pass + apsw.connection_hooks = sys # bad type + try: + db = apsw.Connection(":memory:") + except TypeError: + pass + apsw.connection_hooks = ("a", "tuple", "of", "non-callables") + try: + db = apsw.Connection(":memory:") + except TypeError: + pass + apsw.connection_hooks = (dir, lambda x: 1 / 0) + try: + db = apsw.Connection(":memory:") + except ZeroDivisionError: + pass + + def delit(db): + del db + + apsw.connection_hooks = [delit for _ in range(9000)] + db = apsw.Connection(":memory:") + db.close() + apsw.connection_hooks = [lambda x: x] + db = apsw.Connection(":memory:") + db.close() + + def testCompileOptions(self): + "Verify getting compile options" + # We don't know what the right answers are, so just check + # there are more than zero entries. + v = apsw.compile_options + self.assertEqual(type(v), tuple) + self.assertTrue(len(v) > 1) + + def testKeywords(self): + "Verify keywords" + k = apsw.keywords + self.assertTrue("INSERT" in k) + + def testIssue4(self): + "Issue 4: Error messages and SQLite ticket 3063" + connection = apsw.Connection(":memory:") + cursor = connection.cursor() + + cursor.execute("CREATE TABLE A_TABLE (ID ABC PRIMARY KEY NOT NULL)") + try: + cursor.execute("INSERT INTO A_TABLE VALUES (NULL)") + except: + klass, e, tb = sys.exc_info() + assert "A_TABLE.ID" in str(e) + + try: + cursor.execute("INSERT INTO A_TABLE VALUES (?)", (None, )) + except: + klass, e, tb = sys.exc_info() + assert "A_TABLE.ID" in str(e) + + def testIssue15(self): + "Issue 15: Release GIL during calls to prepare" + self.db.cursor().execute("create table foo(x)") + self.db.cursor().execute("begin exclusive") + db2 = apsw.Connection(TESTFILEPREFIX + "testdb") + db2.setbusytimeout(30000) + t = ThreadRunner(db2.cursor().execute, "select * from foo") + t.start() + time.sleep(1) + self.db.cursor().execute("commit") + t.go() + + def testIssue19(self): + "Issue 19: Incomplete cursor execution" + c = self.db.cursor() + c.execute("create table numbers(x)") + for i in range(10): + c.execute("insert into numbers values(?)", (i, )) + c.execute("select * from numbers") + next(c) + next(c) + next(c) + self.db.cursor().execute("delete from numbers where x=5") + next(c) + next(c) + + def testIssue24(self): + "Issue 24: Ints and Longs" + c = self.db.cursor() + for row in c.execute("select 3"): + pass + self.assertEqual(int, type(row[0])) + for row in c.execute("select -2147483647-1"): + pass + self.assertEqual(int, type(row[0])) + for row in c.execute("select 2147483647"): + pass + self.assertEqual(int, type(row[0])) + # Depending on the platform, sizeof(long), 64 bitness etc we + # may remain as python type int or type long. Check we are + # getting the right numbers no matter what. This duplicates + # testTypes but you can never be too careful. + for v in "2147483646", "2147483647", "2147483648", "2147483649", \ + "21474836460", "21474836470", "21474836480", "21474836490", \ + "147483646", "147483647", "147483648", "147483649": + for neg in ("-", ""): + val = c.execute("select " + neg + v).fetchall()[0][0] + val = repr(val) + if val.endswith("L"): + val = val[:-1] + self.assertEqual(val, neg + v) + + def testIssue31(self): + "Issue 31: GIL & SQLite mutexes with heavy threading, threadsafe errors from SQLite" + randomnumbers = [random.randint(0, 10000) for _ in range(10000)] + + cursor = self.db.cursor() + cursor.execute("create table foo(x)") + cursor.execute("begin") + for num in randomnumbers: + cursor.execute("insert into foo values(?)", (num, )) + cursor.execute("end") + + self.db.createscalarfunction("timesten", lambda x: x * 10) + + def dostuff(n): + # spend n seconds doing stuff to the database + c = self.db.cursor() + b4 = time.time() + while time.time() - b4 < n: + i = random.choice(randomnumbers) + if i % 5 == 0: + sql = "select timesten(x) from foo where x=%d order by x" % (i, ) + c.execute(sql) + elif i % 5 == 1: + sql = "select timesten(x) from foo where x=? order by x" + called = 0 + for row in self.db.cursor().execute(sql, (i, )): + called += 1 + self.assertEqual(row[0], 10 * i) + # same value could be present multiple times + self.assertTrue(called >= 1) + elif i % 5 == 2: + try: + self.db.cursor().execute("deliberate syntax error") + except apsw.SQLError: + assert ("deliberate" in str(sys.exc_info()[1])) + elif i % 5 == 3: + try: + self.db.cursor().execute("bogus syntax error") + except apsw.SQLError: + assert ("bogus" in str(sys.exc_info()[1])) + else: + sql = "select timesten(x) from foo where x=? order by x" + self.db.cursor().execute(sql, (i, )) + + runtime = int(os.getenv("APSW_HEAVY_DURATION")) if os.getenv("APSW_HEAVY_DURATION") else 15 + threads = [ThreadRunner(dostuff, runtime) for _ in range(20)] + for t in threads: + t.start() + + for t in threads: + # if there were any errors then exceptions would be raised here + t.go() + + def testIssue50(self): + "Issue 50: Check Blob.read return value on eof" + # first get what the system returns on eof + f = open(os.devnull, "rb") + try: + # deliberately hit eof + f.read() + # now try to read some more + feof = f.read(10) + finally: + f.close() + cur = self.db.cursor() + # make a blob to play with + rowid = next( + cur.execute("create table foo(x blob); insert into foo values(zeroblob(98765)); select rowid from foo"))[0] + blobro = self.db.blobopen("main", "foo", "x", rowid, False) + try: + blobro.read(98765) + beof = blobro.read(10) + self.assertEqual(type(beof), type(feof)) + self.assertEqual(beof, feof) + finally: + blobro.close() + + def testIssue98(self, runfrom106=None): + "Issue 98: An error in context manager commit should do a rollback" + self.db.cursor().execute("create table foo(x); insert into foo values(3); insert into foo values(4)") + # We need the reader to block a writer, which requires non-WAL mode + self.db.cursor().execute("pragma journal_mode=delete") + db2 = apsw.Connection(TESTFILEPREFIX + "testdb") + if runfrom106: + db2.setexectrace(runfrom106) + db2.cursor().execute("pragma journal_mode=delete") + # deliberately don't read from cursor on connection 1 which will prevent a commit + x = self.db.cursor().execute("select * from foo") + db2.__enter__() + db2.cursor().execute("insert into foo values(5)") # transaction is buffered in memory by SQLite + try: + db2.__exit__(None, None, None) + except apsw.BusyError: + pass + # Ensure transaction was rolled back + x.fetchall() + for row in db2.cursor().execute("select * from foo where x=5"): + self.fail("Transaction was not rolled back") + db2.close() + if runfrom106: return + # Verify that error in tracer results in rollback + self.db.__enter__() + + def h(*args): + 1 / 0 + + self.db.cursor().execute("insert into foo values(6)") + self.db.setexectrace(h) + try: + self.db.__exit__(None, None, None) + except ZeroDivisionError: + self.db.setexectrace(None) + pass + for row in self.db.cursor().execute("select * from foo where x=6"): + self.fail("Transaction was not rolled back") + + def testIssue103(self): + "Issue 103: Error handling when sqlite3_declare_vtab fails" + + class Source: + + def Create(self, *args): + return "create table x(delete)", None + + self.db.createmodule("issue103", Source()) + try: + self.db.cursor().execute("create virtual table foo using issue103()") + 1 / 0 # should not be reached + except apsw.SQLError: + assert "near \"delete\": syntax error" in str(sys.exc_info()[1]) + + def testIssue106(self): + "Issue 106: Profiling and tracing" + traces = [] + + def tracer(cur, sql, bindings): + sql = sql.lower().split()[0] + if sql in ("savepoint", "release", "rollback"): + traces.append(sql) + return True + + self.testIssue98(tracer) + self.assertTrue(len(traces) >= 3) + self.assertTrue("savepoint" in traces) + self.assertTrue("release" in traces) + self.assertTrue("rollback" in traces) + + def testIssue142(self): + "Issue 142: bytes from system during dump" + orig_strftime = time.strftime + orig_getuser = getpass.getuser + fh = [] + try: + time.strftime = lambda arg: b"gjkTIMEJUNKhgjhg\xfe\xdf" + getpass.getuser = lambda: b"\x81\x82\x83gjkhgUSERJUNKjhg\xfe\xdf" + fh = [open(TESTFILEPREFIX + "test-shell-" + t, "w+", encoding="utf8") for t in ("in", "out", "err")] + kwargs = {"stdin": fh[0], "stdout": fh[1], "stderr": fh[2]} + + rows = (["correct"], ["horse"], ["battery"], ["staple"]) + self.db.cursor().execute("create table foo(x)") + self.db.cursor().executemany("insert into foo values(?)", rows) + shell = apsw.shell.Shell(db=self.db, **kwargs) + shell.command_dump([]) + + fh[1].seek(0) + out = fh[1].read() + + for row in rows: + self.assertTrue(row[0] in out) + + self.assertTrue("TIMEJUNK" in out) + self.assertTrue("USERJUNK" in out) + + finally: + for f in fh: + f.close() + time.strftime = orig_strftime + getpass.getuser = orig_getuser + + def testIssue186(self): + "Issue 186: desription cache between statements" + cur = self.db.cursor() + + for i, row in enumerate(cur.execute("select 1; select 1,2; select 1,2,3; select 1,2,3,4;")): + # this catches if the order of getting them makes a difference + if i % 2: + self.assertEqual(len(cur.description), len(cur.getdescription())) + else: + self.assertEqual(len(cur.getdescription()), len(cur.description)) + self.assertEqual(len(cur.description), i + 1) + + # check executemany too + for i, row in enumerate( + cur.executemany("select ?; select ?,?; select ?,?,?; select ?,?,?,?;", [ + (1, 1, 2, 1, 2, 3, 1, 2, 3, 4), + (1, 1, 2, 1, 2, 3, 1, 2, 3, 4), + ])): + i %= 4 + self.assertEqual(len(cur.getdescription()), i + 1) + + # and the tracers + def tracer(cursor, *args): + self.assertEqual(len(cursor.getdescription()), expect) + return True + + expect = 1 + cur.setexectrace(tracer) + cur.setrowtrace(tracer) + for i, row in enumerate(cur.execute("select 1; select 1,2; select 1,2,3; select 1,2,3,4;")): + expect += 1 + expect = 1 + for i, row in enumerate( + cur.executemany("select ?; select ?,?; select ?,?,?; select ?,?,?,?;", [ + (1, 1, 2, 1, 2, 3, 1, 2, 3, 4), + (1, 1, 2, 1, 2, 3, 1, 2, 3, 4), + ])): + expect += 1 + if expect > 4: expect = 1 + + def testTicket2158(self): + "Check we are not affected by SQLite ticket #2158" + + # https://sqlite.org/cvstrac/tktview?tn=2158 + def dummy(x, y): + if x < y: return -1 + if x > y: return 1 + return 0 + + self.db.createcollation("dummy", dummy) + cur = self.db.cursor() + cur.execute("create table foo(x)") + cur.executemany("insert into foo values(?)", randomintegers(20)) + for row in cur.execute("select * from foo order by x collate dummy"): + pass + self.db.createcollation("dummy", None) + self.assertRaises(apsw.SQLError, cur.execute, "select * from foo order by x collate dummy") + + def testIssue199(self): + "Backup API should accept Connection subclasses" + + # https://github.com/rogerbinns/apsw/issues/199 + class subclass(apsw.Connection): + pass + + dbsub = subclass("") + dbsub.cursor().execute("create table a(b);insert into a values(3);") + + b = self.db.backup("main", dbsub, "main") + try: + while not b.done: + b.step(100) + finally: + b.finish() + + def testIssue311(self): + "Indirect descendents of VFS should support WAL (in addition to direct subclasses)" + + class vfswrapped(apsw.VFS): + + def __init__(self): + self.myname = "testIssue311" + self.base = "" + apsw.VFS.__init__(self, self.myname, self.base) + + def xOpen(self, name, flags): + return vfsfilewrap(self.base, name, flags) + + class vfsfilewrap(apsw.VFSFile): + + def __init__(self, parent, name, flags): + apsw.VFSFile.__init__(self, parent, name, flags) + + # we make testdb be wal and then try to work with it + self.db.cursor().execute( + "pragma journal_mode=wal; create table test(x,y); insert into test values(3,4)").fetchall() + + wrap = vfswrapped() + + con = apsw.Connection(TESTFILEPREFIX + "testdb", vfs=wrap.myname) + + for row in con.cursor().execute("select x+y from test"): + self.assertEqual(row[0], 7) + break + else: + self.fail("No rows seen") + + def testIssue314(self): + "Reference cycles between instance, Connection and instance.method" + cleared = [] + + class SelfReferencer: + + def __del__(self): + cleared.append(id(self)) + + def __init__(self): + self.db = apsw.Connection("") + self.db.setbusyhandler(self.refme) + self.cur = self.db.cursor() + self.cur.setrowtrace(self.refme) + + def refme(self): + pass + + for i in range(1000): + SelfReferencer() + gc.collect() + self.assertEqual(1000, len(cleared)) + + def testPysqliteRecursiveIssue(self): + "Check an issue that affected pysqlite" + # https://code.google.com/p/pysqlite/source/detail?r=260ee266d6686e0f87b0547c36b68a911e6c6cdb + cur = self.db.cursor() + cur.execute("create table a(x); create table b(y);") + + def foo(): + yield (1, ) + cur.execute("insert into a values(?)", (1, )) + yield (2, ) + + self.assertRaises(apsw.ThreadingViolationError, cur.executemany, "insert into b values(?)", foo()) + + def testWriteUnraiseable(self): + "Verify writeunraiseable replacement function" + + def unraise(): + # We cause an unraiseable error to happen by writing to a + # blob open for reading. The close method called in the + # destructor will then also give the error + db = apsw.Connection(":memory:") + rowid = next(db.cursor().execute( + "create table foo(x); insert into foo values(x'aabbccdd'); select rowid from foo"))[0] + blob = db.blobopen("main", "foo", "x", rowid, False) + try: + blob.write(b"badd") + except apsw.ReadOnlyError: + pass + del db + del blob + gc.collect() + + # Normal excepthook + self.assertRaisesUnraisable(apsw.ReadOnlyError, unraise) + # excepthook with error to check PyErr_Display is called + xx = sys.excepthook + yy = sys.stderr + sys.stderr = open(TESTFILEPREFIX + "errout.txt", "wt", encoding="utf8") + + def ehook(blah): + 1 / 0 + + sys.excepthook = ehook + unraise() + sys.stderr.close() + v = open(TESTFILEPREFIX + "errout.txt", "rt", encoding="utf8").read() + deletefile(TESTFILEPREFIX + "errout.txt") + self.assertTrue(len(v)) + sys.excepthook = xx + sys.stderr = yy + + def testStatementCache(self, scsize=17): + "Verify statement cache integrity" + self.db = apsw.Connection(TESTFILEPREFIX + "testdb", statementcachesize=scsize) + cur = self.db.cursor() + cur.execute("create table foo(x,y)") + cur.execute("create index foo_x on foo(x)") + cur.execute("insert into foo values(1,2)") + cur.execute("drop index foo_x") + cur.execute("insert into foo values(1,2)") # cache hit, but needs reprepare + cur.execute("drop table foo; create table foo(x)") + try: + cur.execute("insert into foo values(1,2)") # cache hit, but invalid sql + except apsw.SQLError: + pass + cur.executemany("insert into foo values(?)", [[1], [2]]) + # overflow the statement cache + l = [self.db.cursor().execute("select x from foo" + " " * i) for i in range(scsize + 200)] + del l + gc.collect() + # coverage + l = [] + for i in range(scsize + 10): + l.append(self.db.cursor().execute("select x from foo" + " " * i)) + for row in self.db.cursor().execute("select * from foo"): + pass + # other wrangling + l = [self.db.cursor().execute("select x from foo") for i in range(scsize + 200)] + for i in range(scsize + 200): + for row in self.db.cursor().execute("select * from foo" + " " * i): + pass + del l + gc.collect() + db2 = apsw.Connection(TESTFILEPREFIX + "testdb", statementcachesize=scsize) + cur2 = db2.cursor() + cur2.execute("create table bar(x,y)") + for _ in cur.execute("select * from foo"): + pass + db2.close() + # Get some coverage - overflow cache and recycling + l = [self.db.cursor().execute(u"select 3" + " " * i) for i in range(100 + 256 + 17)] + while l: + l.pop().fetchall() + # embedded nulls + got = [] + try: + for row in cur.execute("select 3;select 4\0;select 5"): + got.append(row[0]) + except ValueError: + self.assertEqual(got, [3]) + # these compile to null vdbe + for _ in range(5): # ensure statement cache is used + for query in ( + "", + "-- foo", + ";", + "\n", + ): + for row in cur.execute(query): + self.fail("Query is empty") + # check with stats + s = self.db.cache_stats() + self.assertEqual(s["size"], scsize) + s2 = self.db.cache_stats(True) + s2.pop("entries") + self.assertEqual(s, s2) + self.assertEqual(self.db.execute("select 3" + " " * s["max_cacheable_bytes"] + "+1").fetchall(), [(4, )]) + self.assertEqual(s["too_big"] + 1, self.db.cache_stats().pop("too_big")) + + s = self.db.cache_stats() + self.db.execute("select 997", can_cache=False).fetchall() + self.assertEqual(s["no_cache"] + 1, self.db.cache_stats().pop("no_cache")) + self.db.execute("select 997", can_cache=True).fetchall() + self.assertEqual(s["misses"] + 2, self.db.cache_stats().pop("misses")) + self.db.execute("select 997", can_cache=True).fetchall() + self.assertEqual(s["misses"] + 2 + (1 if not scsize else 0), self.db.cache_stats().pop("misses")) + + # prepare_flags + class VTModule: + def Create(self, *args): + return ("create table dontcare(x int)", VTTable()) + Connect = Create + + class VTTable: + def Open(self): + return VTCursor() + def BestIndex(self, *args): + return None + + class VTCursor: + rows=[[99], [100]] + def __init__(self): + self.pos=0 + def Filter(self, *args): + self.pos=0 + def Eof(self): + return self.pos>=len(self.rows) + def Column(self, num): + if num<0: + return self.pos+1_000_000 + return self.rows[self.pos][num] + def Next(self): + self.pos+=1 + def Close(self): + pass + + + vt=VTModule() + self.db.createmodule("fortestingonly", vt) + # no_vtab doesn't block creating a vtab + self.db.execute("create VIRTUAL table fred USING fortestingonly()", prepare_flags=apsw.SQLITE_PREPARE_NO_VTAB) + # make sure query using vtab is identical so cache would be hit + query = "select * from fred" + self.assertEqual(self.db.execute(query).fetchall(), [(99,), (100,)]) + # this should fail (sqlite pretends the vtabs don't exist rather than giving specific error) + self.assertRaises(apsw.SQLError, self.db.execute, "select * from fred", prepare_flags=apsw.SQLITE_PREPARE_NO_VTAB) + + def testStatementCacheZeroSize(self): + "Rerun statement cache tests with a zero sized/disabled cache" + self.db = apsw.Connection(TESTFILEPREFIX + "testdb", statementcachesize=-1) + self.testStatementCache(0) + + # the text also includes characters that can't be represented in 16 bits (BMP) + wikipedia_text = u"""Wikipedia\nThe Free Encyclopedia\nEnglish\n6 383 000+ articles\n日本語\n1 292 000+ 記事\nРусский\n1 756 000+ статей\nDeutsch\n2 617 000+ Artikel\nEspañol\n1 717 000+ artículos\nFrançais\n2 362 000+ articles\nItaliano\n1 718 000+ voci\n中文\n1 231 000+ 條目\nPolski\n1 490 000+ haseł\nPortuguês\n1 074 000+ artigos\nSearch Wikipedia\nEN\nEnglish\n\n Read Wikipedia in your language\n1 000 000+ articles\nPolski\nالعربية\nDeutsch\nEnglish\nEspañol\nFrançais\nItaliano\nمصرى\nNederlands\n日本語\nPortuguês\nРусский\nSinugboanong Binisaya\nSvenska\nУкраїнська\nTiếng Việt\nWinaray\n中文\n100 000+ articles\nAfrikaans\nSlovenčina\nAsturianu\nAzərbaycanca\nБългарски\nBân-lâm-gú / Hō-ló-oē\nবাংলা\nБеларуская\nCatalà\nČeština\nCymraeg\nDansk\nEesti\nΕλληνικά\nEsperanto\nEuskara\nفارسی\nGalego\n한국어\nՀայերեն\nहिन्दी\nHrvatski\nBahasa Indonesia\nעברית\nქართული\nLatina\nLatviešu\nLietuvių\nMagyar\nМакедонски\nBahasa Melayu\nBahaso Minangkabau\nNorskbokmålnynorsk\nНохчийн\nOʻzbekcha / Ўзбекча\nҚазақша / Qazaqşa / قازاقشا\nRomână\nSimple English\nSlovenščina\nСрпски / Srpski\nSrpskohrvatski / Српскохрватски\nSuomi\nதமிழ்\nТатарча / Tatarça\nภาษาไทย\nТоҷикӣ\nتۆرکجه\nTürkçe\nاردو\nVolapük\n粵語\nမြန်မာဘာသာ\n10 000+ articles\nBahsa Acèh\nAlemannisch\nአማርኛ\nAragonés\nBasa Banyumasan\nБашҡортса\nБеларуская (Тарашкевіца)\nBikol Central\nবিষ্ণুপ্রিয়া মণিপুরী\nBoarisch\nBosanski\nBrezhoneg\nЧӑвашла\nDiné Bizaad\nEmigliàn–Rumagnòl\nFøroyskt\nFrysk\nGaeilge\nGàidhlig\nગુજરાતી\nHausa\nHornjoserbsce\nIdo\nIlokano\nInterlingua\nИрон æвзаг\nÍslenska\nJawa\nಕನ್ನಡ\nKreyòl Ayisyen\nKurdî / كوردی\nکوردیی ناوەندی\nКыргызча\nКырык Мары\nLëtzebuergesch\nLimburgs\nLombard\nLìgure\nमैथिली\nMalagasy\nമലയാളം\n文言\nमराठी\nმარგალური\nمازِرونی\nMìng-dĕ̤ng-ngṳ̄ / 閩東語\nМонгол\nनेपाल भाषा\nनेपाली\nNnapulitano\nNordfriisk\nOccitan\nМарий\nଓଡି଼ଆ\nਪੰਜਾਬੀ (ਗੁਰਮੁਖੀ)\nپنجابی (شاہ مکھی)\nپښتو\nPiemontèis\nPlattdüütsch\nQırımtatarca\nRuna Simi\nसंस्कृतम्\nСаха Тыла\nScots\nShqip\nSicilianu\nසිංහල\nسنڌي\nŚlůnski\nBasa Sunda\nKiswahili\nTagalog\nతెలుగు\nᨅᨔ ᨕᨙᨁᨗ / Basa Ugi\nVèneto\nWalon\n吳語\nייִדיש\nYorùbá\nZazaki\nŽemaitėška\nisiZulu\n1 000+ articles\nАдыгэбзэ\nÆnglisc\nAkan\nаԥсшәа\nԱրեւմտահայերէն\nArmãneashce\nArpitan\nܐܬܘܪܝܐ\nAvañe’ẽ\nАвар\nAymar\nBasa Bali\nBahasa Banjar\nभोजपुरी\nBislama\nབོད་ཡིག\nБуряад\nChavacano de Zamboanga\nCorsu\nVahcuengh / 話僮\nDavvisámegiella\nDeitsch\nދިވެހިބަސް\nDolnoserbski\nЭрзянь\nEstremeñu\nFiji Hindi\nFurlan\nGaelg\nGagauz\nGĩkũyũ\nگیلکی\n贛語\nHak-kâ-ngî / 客家語\nХальмг\nʻŌlelo Hawaiʻi\nIgbo\nInterlingue\nKabɩyɛ\nKapampangan\nKaszëbsczi\nKernewek\nភាសាខ្មែរ\nKinyarwanda\nКоми\nKongo\nकोंकणी / Konknni\nKriyòl Gwiyannen\nພາສາລາວ\nDzhudezmo / לאדינו\nЛакку\nLatgaļu\nЛезги\nLingála\nlojban\nLuganda\nMalti\nReo Mā’ohi\nMāori\nMirandés\nМокшень\nߒߞߏ\nNa Vosa Vaka-Viti\nNāhuatlahtōlli\nDorerin Naoero\nNedersaksisch\nNouormand / Normaund\nNovial\nAfaan Oromoo\nঅসমীযা়\nपालि\nPangasinán\nPapiamentu\nПерем Коми\nPfälzisch\nPicard\nКъарачай–Малкъар\nQaraqalpaqsha\nRipoarisch\nRumantsch\nРусиньскый Язык\nGagana Sāmoa\nSardu\nSeeltersk\nSesotho sa Leboa\nChiShona\nSoomaaliga\nSranantongo\nTaqbaylit\nTarandíne\nTetun\nTok Pisin\nfaka Tonga\nTürkmençe\nТыва дыл\nУдмурт\nئۇيغۇرچه\nVepsän\nVõro\nWest-Vlams\nWolof\nisiXhosa\nZeêuws\n100+ articles\nBamanankan\nChamoru\nChichewa\nEʋegbe\nFulfulde\n𐌲𐌿𐍄𐌹𐍃𐌺\nᐃᓄᒃᑎᑐᑦ / Inuktitut\nIñupiak\nKalaallisut\nكٲشُر\nLi Niha\nNēhiyawēwin / ᓀᐦᐃᔭᐍᐏᐣ\nNorfuk / Pitkern\nΠοντιακά\nརྫོང་ཁ\nRomani\nKirundi\nSängö\nSesotho\nSetswana\nСловѣ́ньскъ / ⰔⰎⰑⰂⰡⰐⰠⰔⰍⰟ\nSiSwati\nThuɔŋjäŋ\nᏣᎳᎩ\nTsėhesenėstsestotse\nTshivenḓa\nXitsonga\nchiTumbuka\nTwi\nትግርኛ\nဘာသာ မန်\n""" + assert (any(ord(c) > 65536 for c in wikipedia_text)) + + def testWikipedia(self): + "Use front page of wikipedia to check unicode handling" + self.db.close() + text = APSW.wikipedia_text + for encoding in "UTF-16", "UTF-16le", "UTF-16be", "UTF-8": + if os.path.exists(TESTFILEPREFIX + "testdb"): + deletefile(TESTFILEPREFIX + "testdb") + db = apsw.Connection(TESTFILEPREFIX + "testdb") + c = db.cursor() + c.execute("pragma encoding=\"%s\"" % (encoding, )) + for row in c.execute("pragma encoding"): + # we use startswith as UTF-16 will be returned with le/be suffix + self.assertTrue(row[0].startswith(encoding)) + c.execute("create table foo(x); insert into foo values(?)", (text, )) + for row in c.execute("select * from foo"): + self.assertEqual(row[0], text) + db.close() + + # calls that need protection + calls={ + 'sqlite3api': { # items of interest - sqlite3 calls + 'match': re.compile(r"(sqlite3_[A-Za-z0-9_]+)\s*\("), + # what must also be on same or preceding line + 'needs': re.compile("PYSQLITE(_|_BLOB_|_CON_|_CUR_|_SC_|_VOID_|_BACKUP_)CALL"), + + # except if match.group(1) matches this - these don't + # acquire db mutex so no need to wrap (determined by + # examining sqlite3.c). If they acquire non-database + # mutexes then that is ok. + + # In the case of sqlite3_result_*|declare_vtab, the mutex + # is already held by enclosing sqlite3_step and the + # methods will only be called from that same thread so it + # isn't a problem. + 'skipcalls': re.compile("^sqlite3_(blob_bytes|column_count|bind_parameter_count|data_count|vfs_.+|changes64|total_changes64" + "|get_autocommit|last_insert_rowid|complete|interrupt|limit|malloc64|free|threadsafe|value_.+" + "|libversion|enable_shared_cache|initialize|shutdown|config|memory_.+|soft_heap_limit(64)?" + "|randomness|db_readonly|db_filename|release_memory|status64|result_.+|user_data|mprintf|aggregate_context" + "|declare_vtab|backup_remaining|backup_pagecount|mutex_enter|mutex_leave|sourceid|uri_.+" + "|column_name|column_decltype|column_database_name|column_table_name|column_origin_name" + "|stmt_isexplain|stmt_readonly)$"), + # error message + 'desc': "sqlite3_ calls must wrap with PYSQLITE_CALL", + }, + 'inuse': { + 'match': re.compile(r"(convert_column_to_pyobject|statementcache_prepare|statementcache_finalize|statementcache_next)\s*\("), + 'needs': re.compile("INUSE_CALL"), + 'desc': "call needs INUSE wrapper", + "skipfiles": re.compile(r".*[/\\]statementcache.c$"), + }, + } + + def sourceCheckMutexCall(self, filename, name, lines): + # we check that various calls are wrapped with various macros + for i, line in enumerate(lines): + if "PYSQLITE_CALL" in line and "Py" in line: + self.fail("%s: %s() line %d - Py call while GIL released - %s" % (filename, name, i, line.strip())) + for k, v in self.calls.items(): + if v.get('skipfiles', None) and v['skipfiles'].match(filename): + continue + mo = v['match'].search(line) + if mo: + func = mo.group(1) + if v.get('skipcalls', None) and v['skipcalls'].match(func): + continue + if not v["needs"].search(line) and not v["needs"].search(lines[i - 1]): + self.fail("%s: %s() line %d call to %s(): %s - %s\n" % + (filename, name, i, func, v['desc'], line.strip())) + + def sourceCheckFunction(self, filename, name, lines): + # not further checked + if name.split("_")[0] in ("ZeroBlobBind", "APSWVFS", "APSWVFSFile", "APSWBuffer", "FunctionCBInfo", + "apswurifilename"): + return + + checks = { + "APSWCursor": { + "skip": ("dealloc", "init", "dobinding", "dobindings", "doexectrace", "dorowtrace", "step", "close", + "close_internal", "tp_traverse"), + "req": { + "use": "CHECK_USE", + "closed": "CHECK_CURSOR_CLOSED", + }, + "order": ("use", "closed") + }, + "Connection": { + "skip": ("internal_cleanup", "dealloc", "init", "close", "interrupt", "close_internal", + "remove_dependent", "readonly", "getmainfilename", "db_filename", "traverse", "clear", + "tp_traverse", "get_cursor_factory", "set_cursor_factory"), + "req": { + "use": "CHECK_USE", + "closed": "CHECK_CLOSED", + }, + "order": ("use", "closed") + }, + "APSWBlob": { + "skip": ("dealloc", "init", "close", "close_internal"), + "req": { + "use": "CHECK_USE", + "closed": "CHECK_BLOB_CLOSED" + }, + "order": ("use", "closed") + }, + "APSWBackup": { + "skip": ("dealloc", "init", "close_internal", "get_remaining", "get_pagecount"), + "req": { + "use": "CHECK_USE", + "closed": "CHECK_BACKUP_CLOSED" + }, + "order": ("use", "closed") + }, + "apswvfs": { + "req": { + "preamble": "VFSPREAMBLE", + "tb": "AddTraceBackHere", + "postamble": "VFSPOSTAMBLE" + }, + "order": ("preamble", "tb", "postamble") + }, + "apswvfspy": { + "req": { + "check": "CHECKVFSPY", + "notimpl": "VFSNOTIMPLEMENTED(%(base)s," + }, + "order": ("check", "notimpl"), + }, + "apswvfspy_unregister": { + "req": { + "check": "CHECKVFSPY", + }, + }, + "apswvfsfile": { + "req": { + "preamble": "FILEPREAMBLE", + "postamble": "FILEPOSTAMBLE", + }, + "order": ("preamble", "postamble") + }, + "apswvfsfilepy": { + "skip": ("xClose", ), + "req": { + "check": "CHECKVFSFILEPY", + "notimpl": "VFSFILENOTIMPLEMENTED(%(base)s," + }, + "order": ("check", "notimpl"), + }, + } + + prefix, base = name.split("_", 1) + if name in checks: + checker = checks[name] + elif prefix in checks: + checker = checks[prefix] + else: + self.fail(filename + ": " + prefix + " not in checks (" + name + ")") + + if base in checker.get("skip", ()): + return + + format = {"base": base, "prefix": prefix} + + found = {} + for k in checker["req"]: + found[k] = None + + # check the lines + for i, line in enumerate(lines): + for k, v in checker["req"].items(): + v = v % format + if v in line and found[k] is None: + found[k] = i + + # check they are present + for k, v in checker["req"].items(): + if found[k] is None: + v = v % format + self.fail(filename + ": " + k + " " + v + " missing in " + name) + + # check order + order = checker.get("order", ()) + for i in range(len(order) - 2): + b4 = order[i] + after = order[i + 1] + if found[b4] > found[after]: + self.fail(filename + ": " + checker["req"][b4] % format + " should be before " + + checker["req"][after] % format + " in " + name) + + return + + should_use_compat = ("PyObject_CheckReadBuffer", "PyObject_AsReadBuffer") + + def testSourceChecks(self): + "Check various source code issues" + # We expect a coding style where the functions are named + # Object_method, are at the start of the line and have a first + # parameter named self. + for filename in glob.glob("src/*.c"): + if filename.endswith("testextension.c"): + continue + # check not using C++ style comments + code = read_whole_file(filename, "rt").replace("http://", "http:__").replace("https://", "https:__") + if "//" in code: + self.fail("// style comment in " + filename) + + if filename.replace("\\", "/") != "src/pyutil.c": + for n in self.should_use_compat: + if n in code: + self.fail("Should be using compat function for %s in file %s" % (n, filename)) + + # check check funcs + funcpat1 = re.compile(r"^(\w+_\w+)\s*\(\s*\w+\s*\*\s*self") + funcpat2 = re.compile(r"^(\w+)\s*\(") + name1 = None + name2 = None + lines = [] + infunc = 0 + for line in read_whole_file(filename, "rt").split("\n"): + if line.startswith("}") and infunc: + if infunc == 1: + self.sourceCheckMutexCall(filename, name1, lines) + self.sourceCheckFunction(filename, name1, lines) + elif infunc == 2: + self.sourceCheckMutexCall(filename, name2, lines) + else: + assert False + infunc = 0 + lines = [] + name1 = None + name2 = None + continue + if name1 and line.startswith("{"): + infunc = 1 + continue + if name2 and line.startswith("{"): + infunc = 2 + continue + if infunc: + lines.append(line) + continue + m = funcpat1.match(line) + if m: + name1 = m.group(1) + continue + m = funcpat2.match(line) + if m: + name2 = m.group(1) + continue + + def testConfig(self): + "Verify sqlite3_config wrapper" + # we need to ensure there are no outstanding sqlite objects + self.db = None + gc.collect() + self.assertRaises(apsw.MisuseError, apsw.config, apsw.SQLITE_CONFIG_MEMSTATUS, True) + apsw.shutdown() + try: + self.assertRaises(TypeError, apsw.config) + self.assertRaises(TypeError, apsw.config, "chicken") + apsw.config(apsw.SQLITE_CONFIG_SINGLETHREAD) + self.assertRaises(TypeError, apsw.config, apsw.SQLITE_CONFIG_SINGLETHREAD, 2) + self.assertRaises(TypeError, apsw.config, apsw.SQLITE_CONFIG_MEMSTATUS) + apsw.config(apsw.SQLITE_CONFIG_MEMSTATUS, True) + apsw.config(apsw.SQLITE_CONFIG_MEMSTATUS, False) + self.assertRaises(TypeError, apsw.config, 89748937) + x = 0x7fffffff + self.assertRaises(OverflowError, apsw.config, x * x * x * x) + self.assertTrue(apsw.config(apsw.SQLITE_CONFIG_PCACHE_HDRSZ) >= 0) + apsw.config(apsw.SQLITE_CONFIG_PMASZ, -1) + finally: + # put back to normal + apsw.config(apsw.SQLITE_CONFIG_SERIALIZED) + apsw.config(apsw.SQLITE_CONFIG_MEMSTATUS, True) + apsw.initialize() + + def testFaultInjectionTested(self): + "Make sure all fault injection is tested" + faults = set() + for filename in glob.glob("src/*.c"): + with open(filename, "rt", encoding="utf8") as f: + for line in f: + if "APSW_FAULT_INJECT" in line and "#define" not in line: + mo = re.match(r".*APSW_FAULT_INJECT\s*\(\s*(?P\w+)\s*,.*", line) + assert mo, f"Failed to match line { line }" + name = mo.group("name") + assert name not in faults, f"fault inject name { name } found multiple times" + faults.add(name) + + testcode = read_whole_file(__file__, "rt", "utf8") + + # special case + if re.search(r"\bBackupDependent\b", testcode): + for n in range(1, 6): + testcode += f"\nBackupDependent{ n }\n" + + for name in sorted(faults): + self.assertTrue(re.search(f"\\b{ name }\\b", testcode), f"Couldn't find test for fault '{ name }'") + + def testMemory(self): + "Verify memory tracking functions" + self.assertNotEqual(apsw.memoryused(), 0) + self.assertTrue(apsw.memoryhighwater() >= apsw.memoryused()) + self.assertRaises(TypeError, apsw.memoryhighwater, "eleven") + apsw.memoryhighwater(True) + self.assertEqual(apsw.memoryhighwater(), apsw.memoryused()) + self.assertRaises(TypeError, apsw.softheaplimit, 1, 2) + apsw.softheaplimit(0) + self.assertRaises(TypeError, apsw.releasememory, 1, 2) + res = apsw.releasememory(0x7fffffff) + self.assertTrue(type(res) in (int, )) + apsw.softheaplimit(0x1234567890abc) + self.assertEqual(0x1234567890abc, apsw.softheaplimit(0x1234567890abe)) + + def testRandomness(self): + "Verify randomness routine" + self.assertRaises(TypeError, apsw.randomness, "three") + self.assertRaises(OverflowError, apsw.randomness, 0xffffffffee) + self.assertRaises(ValueError, apsw.randomness, -2) + self.assertEqual(0, len(apsw.randomness(0))) + self.assertEqual(1, len(apsw.randomness(1))) + self.assertEqual(16383, len(apsw.randomness(16383))) + self.assertNotEqual(apsw.randomness(77), apsw.randomness(77)) + + def testSqlite3Pointer(self): + "Verify getting underlying sqlite3 pointer" + self.assertRaises(TypeError, self.db.sqlite3pointer, 7) + self.assertTrue(type(self.db.sqlite3pointer()) in (int, )) + self.assertEqual(self.db.sqlite3pointer(), self.db.sqlite3pointer()) + self.assertNotEqual(self.db.sqlite3pointer(), apsw.Connection(":memory:").sqlite3pointer()) + + def testPickle(self, module=None): + "Verify data etc can be pickled" + if module == None: + import pickle + self.testPickle(pickle) + try: + import cPickle + self.testPickle(cPickle) + except ImportError: + pass + return + + import pickle + PicklingError = pickle.PicklingError + try: + import cPickle + PicklingError = (PicklingError, cPickle.PicklingError) + except ImportError: + pass + + # work out what protocol versions we can use + versions = [] + for num in range(-1, 20): + try: + module.dumps(3, num) + versions.append(num) + except ValueError: + pass + + # some objects to try pickling + vals = test_types_vals + cursor = self.db.cursor() + cursor.execute("create table if not exists t(i,x)") + + def canpickle(val): + return True + + cursor.execute("BEGIN") + cursor.executemany("insert into t values(?,?)", [(i, v) for i, v in enumerate(vals) if canpickle(v)]) + cursor.execute("COMMIT") + + for ver in versions: + for row in cursor.execute("select * from t"): + self.assertEqual(row, module.loads(module.dumps(row, ver))) + rownum, val = row + if type(vals[rownum]) is float: + self.assertAlmostEqual(vals[rownum], val) + else: + self.assertEqual(vals[rownum], val) + # can't pickle cursors + try: + module.dumps(cursor, ver) + except TypeError: + pass + except PicklingError: + pass + # some versions can pickle the db, but give a zeroed db back + db = None + try: + db = module.loads(module.dumps(self.db, ver)) + except TypeError: + pass + if db is not None: + self.assertRaises(apsw.ConnectionClosedError, db.db_filename, "main") + self.assertRaises(apsw.ConnectionClosedError, db.cursor) + self.assertRaises(apsw.ConnectionClosedError, db.getautocommit) + self.assertRaises(apsw.ConnectionClosedError, db.in_transaction) + + def testStatus(self): + "Verify status function" + self.assertRaises(TypeError, apsw.status, "zebra") + self.assertRaises(apsw.MisuseError, apsw.status, 2323) + for i in apsw.mapping_status: + if type(i) != type(""): continue + res = apsw.status(getattr(apsw, i)) + self.assertEqual(len(res), 2) + self.assertEqual(type(res), tuple) + self.assertTrue(res[0] <= res[1]) + + def testDBStatus(self): + "Verify db status function" + self.assertRaises(TypeError, self.db.status, "zebra") + self.assertRaises(apsw.SQLError, self.db.status, 2323) + for i in apsw.mapping_db_status: + if type(i) != type(""): continue + res = self.db.status(getattr(apsw, i)) + self.assertEqual(len(res), 2) + self.assertEqual(type(res), tuple) + self.assertTrue(res[1] == 0 or res[0] <= res[1]) + + def testTxnState(self): + "Verify db.txn_state" + n = u"\u1234\u3454324" + self.assertRaises(TypeError, self.db.txn_state, 3) + self.assertEqual(apsw.mapping_txn_state["SQLITE_TXN_NONE"], self.db.txn_state()) + self.db.cursor().execute("BEGIN EXCLUSIVE") + self.assertEqual(apsw.mapping_txn_state["SQLITE_TXN_WRITE"], self.db.txn_state()) + self.db.cursor().execute("END") + self.assertEqual(apsw.mapping_txn_state["SQLITE_TXN_NONE"], self.db.txn_state()) + self.assertRaises(ValueError, self.db.txn_state, n) + self.assertEqual(apsw.mapping_txn_state["SQLITE_TXN_NONE"], self.db.txn_state("main")) + + def testZeroBlob(self): + "Verify handling of zero blobs" + self.assertRaises(TypeError, apsw.zeroblob) + self.assertRaises(TypeError, apsw.zeroblob, "foo") + self.assertRaises(TypeError, apsw.zeroblob, -7) + self.assertRaises(OverflowError, apsw.zeroblob, 4000000000) + cur = self.db.cursor() + cur.execute("create table foo(x)") + cur.execute("insert into foo values(?)", (apsw.zeroblob(27), )) + v = next(cur.execute("select * from foo"))[0] + self.assertEqual(v, b"\x00" * 27) + + # Make sure inheritance works + class multi: + + def __init__(self, *args): + self.foo = 3 + + class derived(apsw.zeroblob): + + def __init__(self, num): + #multi.__init__(self) + apsw.zeroblob.__init__(self, num) + + cur.execute("delete from foo; insert into foo values(?)", (derived(28), )) + v = next(cur.execute("select * from foo"))[0] + self.assertEqual(v, b"\x00" * 28) + self.assertEqual(apsw.zeroblob(91210).length(), 91210) + + def testBlobIO(self): + "Verify Blob input/output" + cur = self.db.cursor() + rowid = next( + cur.execute("create table foo(x blob); insert into foo values(zeroblob(98765)); select rowid from foo"))[0] + self.assertRaises(TypeError, self.db.blobopen, 1) + self.assertRaises(TypeError, self.db.blobopen, u"main", "foo\xf3") + self.assertRaises(TypeError, self.db.blobopen, u"main", "foo", "x", complex(-1, -1), True) + self.assertRaises(TypeError, self.db.blobopen, u"main", "foo", "x", rowid, True, False) + self.assertRaises(apsw.SQLError, self.db.blobopen, "main", "foo", "x", rowid + 27, False) + self.assertRaises(apsw.SQLError, self.db.blobopen, "foo", "foo", "x", rowid, False) + self.assertRaises(apsw.SQLError, self.db.blobopen, "main", "x", "x", rowid, False) + self.assertRaises(apsw.SQLError, self.db.blobopen, "main", "foo", "y", rowid, False) + blobro = self.db.blobopen("main", "foo", "x", rowid, False) + # sidebar: check they can't be manually created + self.assertRaises(TypeError, type(blobro)) + # check vals + self.assertEqual(blobro.length(), 98765) + self.assertEqual(blobro.length(), 98765) + self.assertEqual(blobro.read(0), b"") + zero = b"\x00" + step = 5 # must be exact multiple of size + assert (blobro.length() % step == 0) + for i in range(0, 98765, step): + x = blobro.read(step) + self.assertEqual(zero * step, x) + x = blobro.read(10) + self.assertEqual(x, b"") + blobro.seek(0, 1) + self.assertEqual(blobro.tell(), 98765) + blobro.seek(0) + self.assertEqual(blobro.tell(), 0) + self.assertEqual(len(blobro.read(11119999)), 98765) + blobro.seek(2222) + self.assertEqual(blobro.tell(), 2222) + blobro.seek(0, 0) + self.assertEqual(blobro.tell(), 0) + self.assertEqual(blobro.read(), b"\x00" * 98765) + blobro.seek(-3, 2) + self.assertEqual(blobro.read(), b"\x00" * 3) + # check types + self.assertRaises(TypeError, blobro.read, "foo") + self.assertRaises(TypeError, blobro.tell, "foo") + self.assertRaises(TypeError, blobro.seek) + self.assertRaises(TypeError, blobro.seek, "foo", 1) + self.assertRaises(TypeError, blobro.seek, 0, 1, 2) + self.assertRaises(ValueError, blobro.seek, 0, -3) + self.assertRaises(ValueError, blobro.seek, 0, 3) + # can't seek before beginning or after end of file + self.assertRaises(ValueError, blobro.seek, -1, 0) + self.assertRaises(ValueError, blobro.seek, 25, 1) + self.assertRaises(ValueError, blobro.seek, 25, 2) + self.assertRaises(ValueError, blobro.seek, 100000, 0) + self.assertRaises(ValueError, blobro.seek, -100000, 1) + self.assertRaises(ValueError, blobro.seek, -100000, 2) + # close testing + blobro.seek(0, 0) + self.assertRaises(apsw.ReadOnlyError, blobro.write, b"kermit was here") + # you get the error on the close too, and blob is always closed - sqlite ticket #2815 + self.assertRaises(apsw.ReadOnlyError, blobro.close) + # check can't work on closed blob + self.assertRaises(ValueError, blobro.read) + self.assertRaises(ValueError, blobro.readinto, b"ab") + self.assertRaises(ValueError, blobro.seek, 0, 0) + self.assertRaises(ValueError, blobro.tell) + self.assertRaises(ValueError, blobro.write, "abc") + # readinto tests + rowidri = self.db.cursor().execute( + "insert into foo values(x'112233445566778899aabbccddeeff'); select last_insert_rowid()").fetchall()[0][0] + blobro = self.db.blobopen("main", "foo", "x", rowidri, False) + self.assertRaises(TypeError, blobro.readinto) + self.assertRaises(TypeError, blobro.readinto, 3) + buffers = [] + import array + buffers.append(array.array("b", b"\0\0\0\0")) + buffers.append(bytearray(b"\0\0\0\0")) + + # bytearray returns ints rather than chars so a fixup + def _fixup(c): + if type(c) == int: + return bytes([c]) + return c + + for buf in buffers: + self.assertRaises(TypeError, blobro.readinto) + self.assertRaises(TypeError, blobro.readinto, buf, buf) + self.assertRaises(TypeError, blobro.readinto, buf, 1, buf) + self.assertRaises(TypeError, blobro.readinto, buf, 1, 1, buf) + blobro.seek(0) + blobro.readinto(buf, 1, 1) + self.assertEqual(_fixup(buf[0]), b"\x00") + self.assertEqual(_fixup(buf[1]), b"\x11") + self.assertEqual(_fixup(buf[2]), b"\x00") + self.assertEqual(_fixup(buf[3]), b"\x00") + self.assertEqual(len(buf), 4) + blobro.seek(3) + blobro.readinto(buf) + + def check_unchanged(): + self.assertEqual(_fixup(buf[0]), b"\x44") + self.assertEqual(_fixup(buf[1]), b"\x55") + self.assertEqual(_fixup(buf[2]), b"\x66") + self.assertEqual(_fixup(buf[3]), b"\x77") + self.assertEqual(len(buf), 4) + + check_unchanged() + blobro.seek(14) + # too much requested + self.assertRaises(ValueError, blobro.readinto, buf, 1) + check_unchanged() + # bounds errors + self.assertRaises(ValueError, blobro.readinto, buf, 1, -1) + self.assertRaises(ValueError, blobro.readinto, buf, 1, 7) + self.assertRaises(ValueError, blobro.readinto, buf, -1, 2) + self.assertRaises(ValueError, blobro.readinto, buf, 10000, 2) + self.assertRaises(OverflowError, blobro.readinto, buf, 1, 45236748972389749283) + check_unchanged() + # get a read error + blobro.seek(0) + self.db.cursor().execute("update foo set x=x'112233445566' where rowid=?", (rowidri, )) + self.assertRaises(apsw.AbortError, blobro.readinto, buf) + # should fail with buffer being a string + self.assertRaises(TypeError, blobro.readinto, "abcd", 1, 1) + self.assertRaises(TypeError, blobro.readinto, u"abcd", 1, 1) + # write tests + blobrw = self.db.blobopen("main", "foo", "x", rowid, True) + self.assertEqual(blobrw.length(), 98765) + blobrw.write(b"abcd") + blobrw.seek(0, 0) + self.assertEqual(blobrw.read(4), b"abcd") + blobrw.write(b"efg") + blobrw.seek(0, 0) + self.assertEqual(blobrw.read(7), b"abcdefg") + blobrw.seek(50, 0) + blobrw.write(b"hijkl") + blobrw.seek(-98765, 2) + self.assertEqual(blobrw.read(55), b"abcdefg" + b"\x00" * 43 + b"hijkl") + self.assertRaises(TypeError, blobrw.write, 12) + self.assertRaises(TypeError, blobrw.write) + self.assertRaises(TypeError, blobrw.write, u"foo") + # try to go beyond end + self.assertRaises(ValueError, blobrw.write, b" " * 100000) + self.assertRaises(TypeError, blobrw.close, "elephant") + # coverage + blobro = self.db.blobopen("main", "foo", "x", rowid, False) + self.assertRaises(apsw.ReadOnlyError, blobro.write, b"abcd") + blobro.close(True) + self.db.cursor().execute("insert into foo(_rowid_, x) values(99, 1)") + blobro = self.db.blobopen("main", "foo", "x", rowid, False) + self.assertRaises(TypeError, blobro.reopen) + self.assertRaises(TypeError, blobro.reopen, "banana") + self.assertRaises(OverflowError, blobro.reopen, 45236748972389749283) + first = blobro.read(2) + # check position is reset + blobro.reopen(rowid) + self.assertEqual(blobro.tell(), 0) + self.assertEqual(first, blobro.read(2)) + # invalid reopen + self.assertRaises(apsw.SQLError, blobro.reopen, 0x1ffffffff) + blobro.close() + + def testBlobReadError(self): + "Ensure blob read errors are handled well" + cur = self.db.cursor() + cur.execute("create table ioerror (x, blob)") + cur.execute("insert into ioerror (rowid,x,blob) values (2,3,x'deadbeef')") + blob = self.db.blobopen("main", "ioerror", "blob", 2, False) + blob.read(1) + # Do a write which cause blob to become invalid + cur.execute("update ioerror set blob='fsdfdsfasd' where x=3") + try: + blob.read(1) + 1 / 0 + except: + klass, value = sys.exc_info()[:2] + self.assertTrue(klass is apsw.AbortError) + + def testAutovacuumPages(self): + self.assertRaises(TypeError, self.db.autovacuum_pages) + self.assertRaises(TypeError, self.db.autovacuum_pages, 3) + for stmt in ("pragma page_size=512", "pragma auto_vacuum=FULL", "create table foo(x)", "begin"): + self.db.cursor().execute(stmt) + self.db.cursor().executemany("insert into foo values(zeroblob(1023))", [tuple() for _ in range(500)]) + + self.db.cursor().execute("commit") + + rowids = [row[0] for row in self.db.cursor().execute("select ROWID from foo")] + + last_free = [0] + + def avpcb(schema, nPages, nFreePages, nBytesPerPage): + self.assertEqual(schema, "main") + self.assertTrue(nFreePages < nPages) + self.assertTrue(nFreePages >= 2) + self.assertEqual(nBytesPerPage, 512) + # we always return 1, so second call must have more free pages than first + if last_free[0]: + self.assertTrue(nFreePages > last_free[0]) + else: + last_free[0] = nFreePages + return 1 + + def noparams(): + pass + + def badreturn(*args): + return "seven" + + self.db.cursor().execute("delete from foo where rowid=?", (rowids.pop(), )) + + self.db.autovacuum_pages(noparams) + self.assertRaises(TypeError, self.db.cursor().execute, "delete from foo where rowid=?", (rowids.pop(), )) + self.db.autovacuum_pages(None) + self.db.cursor().execute("delete from foo where rowid=?", (rowids.pop(), )) + self.db.autovacuum_pages(avpcb) + self.db.cursor().execute("delete from foo where rowid=?", (rowids.pop(), )) + self.db.autovacuum_pages(badreturn) + self.assertRaises(TypeError, self.db.cursor().execute, "delete from foo where rowid=?", (rowids.pop(), )) + self.db.autovacuum_pages(None) + self.db.cursor().execute("delete from foo where rowid=?", (rowids.pop(), )) + + def testURIFilenames(self): + assertRaises = self.assertRaises + assertEqual = self.assertEqual + + class TVFS(apsw.VFS): + + def __init__(self): + apsw.VFS.__init__(self, "uritest", "") + + def xOpen(self, name, flags): + assert isinstance(name, apsw.URIFilename) + # The various errors + assertRaises(TypeError, name.uri_parameter) + assertRaises(TypeError, name.uri_parameter, 2) + assertRaises(TypeError, name.uri_int) + assertRaises(TypeError, name.uri_int, 7) + assertRaises(TypeError, name.uri_int, 7, 7) + assertRaises(TypeError, name.uri_int, 7, 7, 7) + assertRaises(TypeError, name.uri_int, "seven", "seven") + assertRaises(TypeError, name.uri_boolean, "seven") + assertRaises(TypeError, name.uri_boolean, "seven", "seven") + assertRaises(TypeError, name.uri_boolean, "seven", None) + # Check values + assert name.filename().endswith("testdb2") + assertEqual(name.uri_parameter("notexist"), None) + assertEqual(name.uri_parameter("foo"), "1&2=3") + assertEqual(name.uri_int("foo", -7), -7) + assertEqual(name.uri_int("bar", -7), 43242342) + # https://sqlite.org/src/info/5f41597f7c + # assertEqual(name.uri_boolean("foo", False), False) + assertEqual(name.uri_boolean("bam", False), True) + assertEqual(name.uri_boolean("baz", True), False) + 1 / 0 + + testvfs = TVFS() + self.assertRaises(apsw.SQLError, + self.assertRaisesUnraisable, + ZeroDivisionError, + apsw.Connection, + "file:testdb2?foo=1%262%3D3&bar=43242342&bam=true&baz=fal%73%65", + flags=apsw.SQLITE_OPEN_READWRITE | apsw.SQLITE_OPEN_CREATE | apsw.SQLITE_OPEN_URI, + vfs="uritest") + + def testVFSWithWAL(self): + "Verify VFS using WAL" + apsw.connection_hooks.append( + lambda c: c.cursor().execute("pragma journal_mode=WAL; PRAGMA wal_autocheckpoint=1").fetchall()) + try: + self.testVFS() + finally: + apsw.connection_hooks.pop() + + def testVFS(self): + "Verify VFS functionality" + global testtimeout + + testdb = vfstestdb + + # Check basic functionality and inheritance - make an obfuscated provider + + # obfusvfs code + def encryptme(data): + # An "encryption" scheme in honour of MAPI and SQL server passwords + if not data: return data + return bytes([x ^ 0xa5 for x in data]) + + class ObfuscatedVFSFile(apsw.VFSFile): + + def __init__(self, inheritfromvfsname, filename, flags): + apsw.VFSFile.__init__(self, inheritfromvfsname, filename, flags) + + def xRead(self, amount, offset): + return encryptme(super(ObfuscatedVFSFile, self).xRead(amount, offset)) + + def xWrite(self, data, offset): + super(ObfuscatedVFSFile, self).xWrite(encryptme(data), offset) + + class ObfuscatedVFS(apsw.VFS): + + def __init__(self, vfsname="obfu", basevfs=""): + self.vfsname = vfsname + self.basevfs = basevfs + apsw.VFS.__init__(self, self.vfsname, self.basevfs) + + def xOpen(self, name, flags): + return ObfuscatedVFSFile(self.basevfs, name, flags) + + vfs = ObfuscatedVFS() + + query = "create table foo(x,y); insert into foo values(1,2); insert into foo values(3,4)" + self.db.cursor().execute(query) + + db2 = apsw.Connection(TESTFILEPREFIX + "testdb2", vfs=vfs.vfsname) + db2.cursor().execute(query) + db2.close() + self.db.cursor().execute("pragma journal_mode=delete").fetchall() + self.db.close() # flush + + # check the two databases are the same (modulo the XOR) + orig = read_whole_file(TESTFILEPREFIX + "testdb", "rb") + obfu = read_whole_file(TESTFILEPREFIX + "testdb2", "rb") + self.assertEqual(len(orig), len(obfu)) + self.assertNotEqual(orig, obfu) + + # we ignore wal/non-wal differences + def compare(one, two): + self.assertEqual(one[0:18], two[:18]) + self.assertEqual(one[96:], two[96:]) + + compare(orig, encryptme(obfu)) + + # helper routines + self.assertRaises(TypeError, apsw.exceptionfor, "three") + self.assertRaises(ValueError, apsw.exceptionfor, 8764324) + self.assertRaises(OverflowError, apsw.exceptionfor, 0xffffffffffffffff10) + + # test raw file object + f = ObfuscatedVFSFile("", os.path.abspath(TESTFILEPREFIX + "testdb"), + [apsw.SQLITE_OPEN_MAIN_DB | apsw.SQLITE_OPEN_READONLY, 0]) + del f # check closes + f = ObfuscatedVFSFile("", os.path.abspath(TESTFILEPREFIX + "testdb"), + [apsw.SQLITE_OPEN_MAIN_DB | apsw.SQLITE_OPEN_READONLY, 0]) + data = f.xRead(len(obfu), 0) # will encrypt it + compare(obfu, data) + f.xClose() + f.xClose() + f2 = apsw.VFSFile("", os.path.abspath(TESTFILEPREFIX + "testdb"), + [apsw.SQLITE_OPEN_MAIN_DB | apsw.SQLITE_OPEN_READONLY, 0]) + del f2 + f2 = apsw.VFSFile("", os.path.abspath(TESTFILEPREFIX + "testdb2"), + [apsw.SQLITE_OPEN_MAIN_DB | apsw.SQLITE_OPEN_READONLY, 0]) + data = f2.xRead(len(obfu), 0) + self.assertEqual(obfu, data) + f2.xClose() + f2.xClose() + + # cleanup so it doesn't interfere with following code using the same file + del f + del f2 + db2.close() + del db2 + vfs.unregister() + gc.collect() + + ### Detailed vfs testing + + # xRandomness is tested first. The method is called once after sqlite initializes + # and only the default vfs is called. Consequently we have a helper test method + # but it is only available when using testfixtures and the amalgamation + self.db = None + gc.collect() + + defvfs = apsw.vfsnames()[0] # we want to inherit from this one + + def testrand(): + gc.collect() + apsw.randomness(0) + vfs = RandomVFS() + db = apsw.Connection(TESTFILEPREFIX + "testdb") + next(db.cursor().execute("select randomblob(10)")) + + class RandomVFSUpper(apsw.VFS): + + def __init__(self): + apsw.VFS.__init__(self, "randomupper", defvfs) + + def xRandomness1(self, n): + return b"\xaa\xbb" + + class RandomVFS(apsw.VFS): + + def __init__(self): + apsw.VFS.__init__(self, "random", "randomupper", makedefault=True) + + def xRandomness1(self, bad, number, of, arguments): + 1 / 0 + + def xRandomness2(self, n): + 1 / 0 + + def xRandomness3(self, n): + return b"abcd" + + def xRandomness4(self, n): + return u"abcd" + + def xRandomness5(self, n): + return b"a" * (2 * n) + + def xRandomness6(self, n): + return None + + def xRandomness7(self, n): + return 3 + + def xRandomness99(self, n): + return super(RandomVFS, self).xRandomness(n + 2049) + + vfsupper = RandomVFSUpper() + vfs = RandomVFS() + self.assertRaises(TypeError, vfs.xRandomness, "jksdhfsd") + self.assertRaises(TypeError, vfs.xRandomness, 3, 3) + self.assertRaises(ValueError, vfs.xRandomness, -88) + + RandomVFS.xRandomness = RandomVFS.xRandomness1 + self.assertRaisesUnraisable(TypeError, testrand) + RandomVFS.xRandomness = RandomVFS.xRandomness2 + self.assertRaisesUnraisable(ZeroDivisionError, testrand) + RandomVFS.xRandomness = RandomVFS.xRandomness3 + testrand() # shouldn't have problems + RandomVFS.xRandomness = RandomVFS.xRandomness4 + self.assertRaisesUnraisable(TypeError, testrand) + RandomVFS.xRandomness = RandomVFS.xRandomness5 + testrand() # shouldn't have problems + RandomVFS.xRandomness = RandomVFS.xRandomness6 + testrand() # shouldn't have problems + RandomVFS.xRandomness = RandomVFS.xRandomness7 + self.assertRaisesUnraisable(TypeError, testrand) + RandomVFS.xRandomness = RandomVFS.xRandomness99 + testrand() # shouldn't have problems + vfsupper.xRandomness = vfsupper.xRandomness1 + testrand() # coverage + vfsupper.unregister() + vfs.unregister() + + class ErrorVFS(apsw.VFS): + # A vfs that returns errors for all methods + def __init__(self): + apsw.VFS.__init__(self, "errorvfs", "") + + def errorme(self, *args): + raise apsw.exceptionfor(apsw.SQLITE_IOERR) + + class TestVFS(apsw.VFS): + + def init1(self): + super(TestVFS, self).__init__("apswtest") + + def init99(self, name="apswtest", base=""): + super(TestVFS, self).__init__(name, base) + + def xDelete1(self, name, syncdir): + super(TestVFS, self).xDelete(".", False) + + def xDelete2(self, bad, number, of, args): + 1 / 0 + + def xDelete3(self, name, syncdir): + 1 / 0 + + def xDelete4(self, name, syncdir): + super(TestVFS, self).xDelete("bad", "arguments") + + def xDelete99(self, name, syncdir): + assert (type(name) == type("")) + assert (type(syncdir) == type(1)) + return super(TestVFS, self).xDelete(name, syncdir) + + def xAccess1(self, bad, number, of, args): + 1 / 0 + + def xAccess2(self, name, flags): + 1 / 0 + + def xAccess3(self, name, flags): + return super(TestVFS, self).xAccess("bad", "arguments") + + def xAccess4(self, name, flags): + return (3, ) + + def xAccess99(self, name, flags): + assert (type(name) == type("")) + assert (type(flags) == type(1)) + return super(TestVFS, self).xAccess(name, flags) + + def xFullPathname1(self, bad, number, of, args): + 1 / 0 + + def xFullPathname2(self, name): + 1 / 0 + + def xFullPathname3(self, name): + return super(TestVFS, self).xFullPathname("bad", "args") + + def xFullPathname4(self, name): + # parameter is larger than default buffer sizes used by sqlite + return super(TestVFS, self).xFullPathname(name * 10000) + + def xFullPathname5(self, name): + # result is larger than default buffer sizes used by sqlite + return "a" * 10000 + + def xFullPathname6(self, name): + return 12 # bad return type + + def xFullPathname99(self, name): + assert (type(name) == type(u"")) + return super(TestVFS, self).xFullPathname(name) + + def xOpen1(self, bad, number, of, arguments): + 1 / 0 + + def xOpen2(self, name, flags): + super(TestVFS, self).xOpen(name, 3) + 1 / 0 + + def xOpen3(self, name, flags): + v = super(TestVFS, self).xOpen(name, flags) + flags.append(v) + return v + + def xOpen4(self, name, flags): + return None + + def xOpen99(self, name, flags): + assert (isinstance(name, apsw.URIFilename) or name is None or type(name) == type(u"")) + assert (type(flags) == type([])) + assert (len(flags) == 2) + assert (type(flags[0]) in (int, )) + assert (type(flags[1]) in (int, )) + return super(TestVFS, self).xOpen(name, flags) + + def xOpen100(self, name, flags): + return TestFile(name, flags) + + def xDlOpen1(self, bad, number, of, arguments): + 1 / 0 + + def xDlOpen2(self, name): + 1 / 0 + + def xDlOpen3(self, name): + return -1 + + def xDlOpen4(self, name): + return "fred" + + def xDlOpen5(self, name): + return super(TestVFS, self).xDlOpen(3) + + # python 3 only test + def xDlOpen6(self, name): + return super(TestVFS, self).xDlOpen(b"abcd") # bad string type + + def xDlOpen7(self, name): + return 0xffffffffffffffff10 + + def xDlOpen99(self, name): + assert (type(name) == type(u"")) + res = super(TestVFS, self).xDlOpen(name) + if ctypes: + try: + cres = ctypes.cdll.LoadLibrary(name)._handle + except: + cres = 0 + assert (res == cres) + return res + + def xDlSym1(self, bad, number, of, arguments): + 1 / 0 + + def xDlSym2(self, handle, name): + 1 / 0 + + def xDlSym3(self, handle, name): + return "fred" + + def xDlSym4(self, handle, name): + super(TestVFS, self).xDlSym(3, 3) + + def xDlSym5(self, handle, name): + return super(TestVFS, self).xDlSym(handle, b"abcd") + + def xDlSym6(self, handle, name): + return 0xffffffffffffffff10 + + def xDlSym99(self, handle, name): + assert (type(handle) in (int, )) + assert (type(name) == type(u"")) + res = super(TestVFS, self).xDlSym(handle, name) + # pypy doesn't have dlsym + if not iswindows and hasattr(_ctypes, "dlsym"): + assert (_ctypes.dlsym(handle, name) == res) + # windows has funky issues I don't want to deal with here + return res + + def xDlClose1(self, bad, number, of, arguments): + 1 / 0 + + def xDlClose2(self, handle): + 1 / 0 + + def xDlClose3(self, handle): + return super(TestVFS, self).xDlClose("three") + + def xDlClose99(self, handle): + assert (type(handle) in (int, )) + super(TestVFS, self).xDlClose(handle) + + def xDlError1(self, bad, number, of, arguments): + 1 / 0 + + def xDlError2(self): + 1 / 0 + + def xDlError3(self): + return super(TestVFS, self).xDlError("three") + + def xDlError4(self): + return 3 + + def xDlError5(self): + return b"abcd" + + def xDlError6(self): + return None + + def xDlError99(self): + return super(TestVFS, self).xDlError() + + def xSleep1(self, bad, number, of, arguments): + 1 / 0 + + def xSleep2(self, microseconds): + 1 / 0 + + def xSleep3(self, microseconds): + return super(TestVFS, self).xSleep("three") + + def xSleep4(self, microseconds): + return "three" + + def xSleep5(self, microseconds): + return 0xffffffff0 + + def xSleep6(self, microseconds): + return 0xffffffffeeeeeeee0 + + def xSleep99(self, microseconds): + assert (type(microseconds) in (int, )) + return super(TestVFS, self).xSleep(microseconds) + + def xCurrentTime1(self, bad, args): + 1 / 0 + + def xCurrentTime2(self): + 1 / 0 + + def xCurrentTime3(self): + return super(TestVFS, self).xCurrentTime("three") + + def xCurrentTime4(self): + return "three" + + def xCurrentTime5(self): + return math.exp(math.pi) * 26000 + + def xCurrentTimeCorrect(self): + # actual correct implementation http://stackoverflow.com/questions/466321/convert-unix-timestamp-to-julian + return time.time() / 86400.0 + 2440587.5 + + def xCurrentTime99(self): + return super(TestVFS, self).xCurrentTime() + + def xGetLastError1(self, bad, args): + 1 / 0 + + def xGetLastError2(self): + 1 / 0 + + def xGetLastError3(self): + return super(TestVFS, self).xGetLastError("three") + + def xGetLastError4(self): + return 3 + + def xGetLastError5(self): + return -17, "a" * 1500 + + def xGetLastError6(self): + return -0x7fffffff - 200, None + + def xGetLastError7(self): + + class te(tuple): + + def __getitem__(self, n): + if n == 0: + return 23 + 1 / 0 + + return te((1, 2)) + + def xGetLastError8(self): + return 0, None + + def xGetLastError9(self): + return 0, "Some sort of message" + + def xGetLastError10(self): + return "banana", "Some sort of message" + + def xGetLastError99(self): + return super(TestVFS, self).xGetLastError() + + def xNextSystemCall1(self, bad, args): + 1 / 0 + + def xNextSystemCall2(self, name): + return 3 + + def xNextSystemCall3(self, name): + return "foo\xf3" + + def xNextSystemCall4(self, name): + 1 / 0 + + def xNextSystemCall99(self, name): + return super(TestVFS, self).xNextSystemCall(name) + + def xGetSystemCall1(self, bad, args): + 1 / 0 + + def xGetSystemCall2(self, name): + 1 / 0 + + def xGetSystemCall3(self, name): + return "fred" + + def xGetSystemCall4(self, name): + return 3.7 + + def xGetSystemCall99(self, name): + return super(TestVFS, self).xGetSystemCall(name) + + def xSetSystemCall1(self, bad, args, args3): + 1 / 0 + + def xSetSystemCall2(self, name, ptr): + 1 / 0 + + def xSetSystemCall3(self, name, ptr): + raise apsw.NotFoundError() + + def xSetSystemCall99(self, name, ptr): + return super(TestVFS, self).xSetSystemCall(name, ptr) + + class TestFile(apsw.VFSFile): + + def init1(self, name, flags): + super(TestFile, self).__init__("bogus", "arguments") + + def init2(self, name, flags): + super(TestFile, self).__init__("bogus", 3, 4) + + def init3(self, name, flags): + super(TestFile, self).__init__("bogus", "4", 4) + + def init4(self, name, flags): + super(TestFile, self).__init__("bogus", "4", [4, 4, 4, 4]) + + def init5(self, name, flags): + super(TestFile, self).__init__("", name, [0xffffffffeeeeeeee0, 0xffffffffeeeeeeee0]) + + def init6(self, name, flags): + super(TestFile, self).__init__("", name, [0xffffffffa, 0]) # 64 bit int vs long overflow + + def init7(self, name, flags): + super(TestFile, self).__init__("", name, (6, 7)) + + def init8(self, name, flags): + super(TestFile, self).__init__("bogus", name, flags) + + def init9(self, name, flags): + super(TestFile, self).__init__("", name, (6, "six")) + + def init10(self, name, flags): + + class badlist(list): # doesn't allows setting an element + + def __init__(self, *args): + super(badlist, self).__init__(args) + + def __setitem__(self, key, value): + raise ValueError("container is frozen") + + super(TestFile, self).__init__("", name, badlist(flags[0], flags[1])) + + def init99(self, name, flags): + super(TestFile, self).__init__("", name, flags) + + def xRead1(self, bad, number, of, arguments): + 1 / 0 + + def xRead2(self, amount, offset): + 1 / 0 + + def xRead3(self, amount, offset): + return 3 + + def xRead4(self, amount, offset): + return u"a" * amount + + def xRead5(self, amount, offset): + return super(TestFile, self).xRead(amount - 1, offset) + + def xRead99(self, amount, offset): + return super(TestFile, self).xRead(amount, offset) + + def xWrite1(self, bad, number, of, arguments): + 1 / 0 + + def xWrite2(self, buffy, offset): + 1 / 0 + + def xWrite99(self, buffy, offset): + return super(TestFile, self).xWrite(buffy, offset) + + def xUnlock1(self, bad, number, of, arguments): + 1 / 0 + + def xUnlock2(self, level): + 1 / 0 + + def xUnlock99(self, level): + return super(TestFile, self).xUnlock(level) + + def xLock1(self, bad, number, of, arguments): + 1 / 0 + + def xLock2(self, level): + 1 / 0 + + def xLock99(self, level): + return super(TestFile, self).xLock(level) + + def xTruncate1(self, bad, number, of, arguments): + 1 / 0 + + def xTruncate2(self, size): + 1 / 0 + + def xTruncate99(self, size): + return super(TestFile, self).xTruncate(size) + + def xSync1(self, bad, number, of, arguments): + 1 / 0 + + def xSync2(self, flags): + 1 / 0 + + def xSync99(self, flags): + return super(TestFile, self).xSync(flags) + + def xSectorSize1(self, bad, number, of, args): + 1 / 0 + + def xSectorSize2(self): + 1 / 0 + + def xSectorSize3(self): + return "three" + + def xSectorSize4(self): + return 0xffffffffeeeeeeee0 + + def xSectorSize99(self): + return super(TestFile, self).xSectorSize() + + def xDeviceCharacteristics1(self, bad, number, of, args): + 1 / 0 + + def xDeviceCharacteristics2(self): + 1 / 0 + + def xDeviceCharacteristics3(self): + return "three" + + def xDeviceCharacteristics4(self): + return 0xffffffffeeeeeeee0 + + def xDeviceCharacteristics99(self): + return super(TestFile, self).xDeviceCharacteristics() + + def xFileSize1(self, bad, number, of, args): + 1 / 0 + + def xFileSize2(self): + 1 / 0 + + def xFileSize3(self): + return "three" + + def xFileSize4(self): + return 0xffffffffeeeeeeee0 + + def xFileSize99(self): + res = super(TestFile, self).xFileSize() + if res < 100000: + return int(res) + return res + + def xCheckReservedLock1(self, bad, number, of, args): + 1 / 0 + + def xCheckReservedLock2(self): + 1 / 0 + + def xCheckReservedLock3(self): + return "three" + + def xCheckReservedLock4(self): + return 0xffffffffeeeeeeee0 + + def xCheckReservedLock99(self): + return super(TestFile, self).xCheckReservedLock() + + def xFileControl1(self, bad, number, of, args): + 1 / 0 + + def xFileControl2(self, op, ptr): + 1 / 0 + + def xFileControl3(self, op, ptr): + return "banana" + + def xFileControl99(self, op, ptr): + if op == 1027: + assert (ptr == 1027) + elif op == 1028: + if ctypes: + assert (True is ctypes.py_object.from_address(ptr).value) + else: + return super(TestFile, self).xFileControl(op, ptr) + return True + + TestVFS.xCurrentTime = TestVFS.xCurrentTimeCorrect + + # check initialization + self.assertRaises(TypeError, apsw.VFS, "3", 3) + self.assertRaises(ValueError, apsw.VFS, "never", "klgfkljdfsljgklfjdsglkdfs") + self.assertTrue("never" not in apsw.vfsnames()) + TestVFS.__init__ = TestVFS.init1 + vfs = TestVFS() + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, apsw.VFSNotImplementedError, testdb) + del vfs + gc.collect() + TestVFS.__init__ = TestVFS.init99 + vfs = TestVFS() + + # Should work without any overridden methods + testdb() + + ## xDelete + self.assertRaises(TypeError, vfs.xDelete, "bogus", "arguments") + TestVFS.xDelete = TestVFS.xDelete1 + err = [apsw.IOError, apsw.IOError][iswindows] + self.assertRaises(err, self.assertRaisesUnraisable, err, testdb) + TestVFS.xDelete = TestVFS.xDelete2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestVFS.xDelete = TestVFS.xDelete3 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, testdb) + TestVFS.xDelete = TestVFS.xDelete4 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestVFS.xDelete = TestVFS.xDelete99 + testdb() + + ## xAccess + self.assertRaises(TypeError, vfs.xAccess, "bogus", "arguments") + TestVFS.xAccess = TestVFS.xAccess1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestVFS.xAccess = TestVFS.xAccess2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, testdb) + TestVFS.xAccess = TestVFS.xAccess3 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestVFS.xAccess = TestVFS.xAccess4 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestVFS.xAccess = TestVFS.xAccess99 + if iswindows: + self.assertRaises(apsw.IOError, vfs.xAccess, u" 0) + + ## xSetSystemCall + fallback = apsw.VFS("fallback", base="") # undo any damage we do + try: + self.assertRaises(TypeError, vfs.xSetSystemCall) + self.assertRaises(TypeError, vfs.xSetSystemCall, 3, 4) + self.assertRaises((TypeError, ValueError), vfs.xSetSystemCall, "a\0b", 4) + self.assertRaises(TypeError, vfs.xSetSystemCall, "none", 3.7) + realopen = vfs.xGetSystemCall("open") + self.assertEqual(False, vfs.xSetSystemCall("doesn't exist", 0)) + self.assertEqual(True, vfs.xSetSystemCall("open", realopen + 1)) + self.assertEqual(realopen + 1, vfs.xGetSystemCall("open")) + self.assertEqual(True, vfs.xSetSystemCall("open", realopen)) + TestVFS.xSetSystemCall = TestVFS.xSetSystemCall1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, vfs2.xSetSystemCall, "open", + realopen) + TestVFS.xSetSystemCall = TestVFS.xSetSystemCall2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, vfs2.xSetSystemCall, + "open", realopen) + TestVFS.xSetSystemCall = TestVFS.xSetSystemCall3 + self.assertEqual(False, vfs2.xSetSystemCall("doesn't exist", 0)) + TestVFS.xSetSystemCall = TestVFS.xSetSystemCall99 + self.assertEqual(True, vfs2.xSetSystemCall("open", realopen)) + finally: + # undocumented - this resets all calls to their defaults + fallback.xSetSystemCall(None, 0) + fallback.unregister() + + ## + ## VFS file testing + ## + + ## init + TestVFS.xOpen = TestVFS.xOpen100 + + TestFile.__init__ = TestFile.init1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.__init__ = TestFile.init2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.__init__ = TestFile.init3 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.__init__ = TestFile.init4 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ValueError, testdb) + TestFile.__init__ = TestFile.init5 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, OverflowError, testdb) + TestFile.__init__ = TestFile.init6 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, OverflowError, testdb) + TestFile.__init__ = TestFile.init7 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.__init__ = TestFile.init8 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ValueError, testdb) + TestFile.__init__ = TestFile.init9 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.__init__ = TestFile.init10 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ValueError, testdb) + TestFile.__init__ = TestFile.init99 + testdb() # should work just fine + + # cause an open failure + self.assertRaises(apsw.CantOpenError, TestFile, ".", + [apsw.SQLITE_OPEN_MAIN_DB | apsw.SQLITE_OPEN_CREATE | apsw.SQLITE_OPEN_READWRITE, 0]) + + ## xRead + t = TestFile(os.path.abspath(TESTFILEPREFIX + "testfile"), + [apsw.SQLITE_OPEN_MAIN_DB | apsw.SQLITE_OPEN_CREATE | apsw.SQLITE_OPEN_READWRITE, 0]) + self.assertRaises(TypeError, t.xRead, "three", "four") + self.assertRaises(OverflowError, t.xRead, 0xffffffffeeeeeeee0, 1) + self.assertRaises(OverflowError, t.xRead, 1, 0xffffffffeeeeeeee0) + TestFile.xRead = TestFile.xRead1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xRead = TestFile.xRead2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, testdb) + TestFile.xRead = TestFile.xRead3 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xRead = TestFile.xRead4 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xRead = TestFile.xRead5 + self.assertRaises(apsw.IOError, self.assertMayRaiseUnraisable, TypeError, testdb) + TestFile.xRead = TestFile.xRead99 + testdb() + + ## xWrite + self.assertRaises(TypeError, t.xWrite, "three", "four") + self.assertRaises(OverflowError, t.xWrite, b"three", 0xffffffffeeeeeeee0) + self.assertRaises(TypeError, t.xWrite, u"foo", 0) + TestFile.xWrite = TestFile.xWrite1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xWrite = TestFile.xWrite2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, testdb) + TestFile.xWrite = TestFile.xWrite99 + testdb() + + ## xUnlock + self.assertRaises(TypeError, t.xUnlock, "three") + self.assertRaises(OverflowError, t.xUnlock, 0xffffffffeeeeeeee0) + # doesn't care about nonsensical levels - assert fails in debug build + # t.xUnlock(-1) + if not apsw.connection_hooks: + TestFile.xUnlock = TestFile.xUnlock1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xUnlock = TestFile.xUnlock2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, testdb) + TestFile.xUnlock = TestFile.xUnlock99 + testdb() + + ## xLock + self.assertRaises(TypeError, t.xLock, "three") + self.assertRaises(OverflowError, t.xLock, 0xffffffffeeeeeeee0) + # doesn't care about nonsensical levels - assert fails in debug build + # t.xLock(0xffffff) + TestFile.xLock = TestFile.xLock1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xLock = TestFile.xLock2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, testdb) + TestFile.xLock = TestFile.xLock99 + testdb() + + ## xTruncate + self.assertRaises(TypeError, t.xTruncate, "three") + self.assertRaises(OverflowError, t.xTruncate, 0xffffffffeeeeeeee0) + if not iswindows: + # windows is happy to truncate to -77 bytes + # see https://sqlite.org/cvstrac/tktview?tn=3415 + self.assertRaises(apsw.IOError, t.xTruncate, -77) + TestFile.xTruncate = TestFile.xTruncate1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xTruncate = TestFile.xTruncate2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, testdb) + TestFile.xTruncate = TestFile.xTruncate99 + testdb() + + ## xSync + saved = apsw.connection_hooks + apsw.connection_hooks = [] + try: + self.assertRaises(TypeError, t.xSync, "three") + self.assertRaises(OverflowError, t.xSync, 0xffffffffeeeeeeee0) + TestFile.xSync = TestFile.xSync1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xSync = TestFile.xSync2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, testdb) + TestFile.xSync = TestFile.xSync99 + testdb() + finally: + apsw.connection_hooks = saved + + ## xSectorSize + self.assertRaises(TypeError, t.xSectorSize, 3) + TestFile.xSectorSize = TestFile.xSectorSize1 + self.assertRaisesUnraisable(TypeError, testdb) + TestFile.xSectorSize = TestFile.xSectorSize2 + self.assertRaisesUnraisable(ZeroDivisionError, testdb) + TestFile.xSectorSize = TestFile.xSectorSize3 + self.assertRaisesUnraisable(TypeError, testdb) + TestFile.xSectorSize = TestFile.xSectorSize4 + self.assertRaisesUnraisable(OverflowError, testdb) + TestFile.xSectorSize = TestFile.xSectorSize99 + testdb() + + ## xDeviceCharacteristics + self.assertRaises(TypeError, t.xDeviceCharacteristics, 3) + TestFile.xDeviceCharacteristics = TestFile.xDeviceCharacteristics1 + self.assertRaisesUnraisable(TypeError, testdb) + TestFile.xDeviceCharacteristics = TestFile.xDeviceCharacteristics2 + self.assertRaisesUnraisable(ZeroDivisionError, testdb) + TestFile.xDeviceCharacteristics = TestFile.xDeviceCharacteristics3 + self.assertRaisesUnraisable(TypeError, testdb) + TestFile.xDeviceCharacteristics = TestFile.xDeviceCharacteristics4 + self.assertRaisesUnraisable(OverflowError, testdb) + TestFile.xDeviceCharacteristics = TestFile.xDeviceCharacteristics99 + testdb() + + ## xFileSize + self.assertRaises(TypeError, t.xFileSize, 3) + TestFile.xFileSize = TestFile.xFileSize1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xFileSize = TestFile.xFileSize2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, testdb) + TestFile.xFileSize = TestFile.xFileSize3 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xFileSize = TestFile.xFileSize4 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, OverflowError, testdb) + TestFile.xFileSize = TestFile.xFileSize99 + testdb() + + ## xCheckReservedLock + self.assertRaises(TypeError, t.xCheckReservedLock, 8) + if not iswindows: + # we don't do checkreservedlock test on windows as the + # various files that need to be copied and finagled behind + # the scenes are locked + TestFile.xCheckReservedLock = TestFile.xCheckReservedLock1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xCheckReservedLock = TestFile.xCheckReservedLock2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, testdb) + TestFile.xCheckReservedLock = TestFile.xCheckReservedLock3 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, testdb) + TestFile.xCheckReservedLock = TestFile.xCheckReservedLock4 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, OverflowError, testdb) + TestFile.xCheckReservedLock = TestFile.xCheckReservedLock99 + db = testdb() + + ## xFileControl + self.assertRaises(TypeError, t.xFileControl, "three", "four") + self.assertRaises(OverflowError, t.xFileControl, 10, 0xffffffffeeeeeeee0) + self.assertRaises(TypeError, t.xFileControl, 10, "three") + self.assertEqual(t.xFileControl(2000, 3000), False) + fc1 = testdb(TESTFILEPREFIX + "testdb", closedb=False).filecontrol + fc2 = testdb(TESTFILEPREFIX + "testdb2", closedb=False).filecontrol + TestFile.xFileControl = TestFile.xFileControl1 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, fc1, "main", 1027, 1027) + TestFile.xFileControl = TestFile.xFileControl2 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, ZeroDivisionError, fc2, "main", 1027, 1027) + TestFile.xFileControl = TestFile.xFileControl3 + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, fc2, "main", 1027, 1027) + TestFile.xFileControl = TestFile.xFileControl99 + del fc1 + del fc2 + # these should work + testdb(closedb=False).filecontrol("main", 1027, 1027) + if ctypes: + objwrap = ctypes.py_object(True) + testdb(closedb=False).filecontrol("main", 1028, ctypes.addressof(objwrap)) + # for coverage + class VFSx(apsw.VFS): + + def __init__(self): + apsw.VFS.__init__(self, "filecontrol", "apswtest") + + vfs2 = VFSx() + testdb(vfsname="filecontrol", closedb=False).filecontrol("main", 1027, 1027) + del vfs2 + + ## xClose + t.xClose() + # make sure there is no problem closing twice + t.xClose() + del t + gc.collect() + + t = apsw.VFSFile("", os.path.abspath(TESTFILEPREFIX + "testfile2"), + [apsw.SQLITE_OPEN_MAIN_DB | apsw.SQLITE_OPEN_CREATE | apsw.SQLITE_OPEN_READWRITE, 0]) + t.xClose() + # check all functions detect closed file + for n in dir(t): + if n not in ('xClose', 'excepthook') and not n.startswith("__"): + self.assertRaises(apsw.VFSFileClosedError, getattr(t, n)) + + def testWith(self): + "Context manager functionality" + + # Does it work? + # the autocommit tests are to make sure we are not in a transaction + self.assertEqual(True, self.db.getautocommit()) + self.assertEqual(False, self.db.in_transaction) + self.assertTableNotExists("foo1") + with self.db as db: + db.cursor().execute('create table foo1(x)') + self.assertTableExists("foo1") + self.assertEqual(True, self.db.getautocommit()) + self.assertEqual(False, self.db.in_transaction) + + # with an error + self.assertEqual(True, self.db.getautocommit()) + self.assertEqual(False, self.db.in_transaction) + self.assertTableNotExists("foo2") + try: + with self.db as db: + db.cursor().execute('create table foo2(x)') + 1 / 0 + except ZeroDivisionError: + pass + self.assertTableNotExists("foo2") + self.assertEqual(True, self.db.getautocommit()) + + # nested - simple - success + with self.db as db: + self.assertEqual(False, self.db.getautocommit()) + self.assertEqual(True, self.db.in_transaction) + db.cursor().execute('create table foo2(x)') + with db as db2: + self.assertEqual(False, self.db.getautocommit()) + db.cursor().execute('create table foo3(x)') + with db2 as db3: + self.assertEqual(False, self.db.getautocommit()) + db.cursor().execute('create table foo4(x)') + self.assertEqual(True, self.db.getautocommit()) + self.assertTableExists("foo2") + self.assertTableExists("foo3") + self.assertTableExists("foo4") + + # nested - simple - failure + try: + self.db.cursor().execute('begin; create table foo5(x)') + with self.db as db: + self.assertEqual(False, self.db.getautocommit()) + db.cursor().execute('create table foo6(x)') + with db as db2: + self.assertEqual(False, self.db.getautocommit()) + db.cursor().execute('create table foo7(x)') + with db2 as db3: + self.assertEqual(False, self.db.getautocommit()) + db.cursor().execute('create table foo8(x)') + 1 / 0 + except ZeroDivisionError: + pass + self.assertEqual(False, self.db.getautocommit()) + self.db.cursor().execute("commit") + self.assertEqual(True, self.db.getautocommit()) + self.assertTableExists("foo5") + self.assertTableNotExists("foo6") + self.assertTableNotExists("foo7") + self.assertTableNotExists("foo8") + + # improve coverage and various corner cases + self.db.__enter__() + self.assertRaises(TypeError, self.db.__exit__, 1) + for i in range(10): + self.db.__exit__(None, None, None) + + # make an exit fail + self.db.__enter__() + self.db.cursor().execute("commit") + # deliberately futz with the outstanding transaction + self.assertRaises(apsw.SQLError, self.db.__exit__, None, None, None) + self.db.__exit__(None, None, None) # extra exit should be harmless + + # exectracing + traces = [] + + def et(con, sql, bindings): + if con == self.db: + traces.append(sql) + return True + + self.db.setexectrace(et) + try: + with self.db as db: + db.cursor().execute('create table foo2(x)') + except apsw.SQLError: # table already exists so we should get an error + pass + + # check we saw the right things in the traces + self.assertTrue(len(traces) == 3) + for s in traces: + self.assertTrue("SAVEPOINT" in s.upper()) + + def et(*args): + return BadIsTrue() + + self.db.setexectrace(et) + try: + with self.db as db: + db.cursor().execute('create table etfoo2(x)') + except ZeroDivisionError: + pass + self.assertTableNotExists("etfoo2") + + def et(*args): + return False + + self.db.setexectrace(et) + try: + with self.db as db: + db.cursor().execute('create table etfoo2(x)') + except apsw.ExecTraceAbort: + pass + self.db.setexectrace(None) + self.assertTableNotExists("etfoo2") + + # test blobs with context manager + self.db.cursor().execute("create table blobby(x); insert into blobby values(x'aabbccddee')") + rowid = self.db.last_insert_rowid() + blob = self.db.blobopen('main', 'blobby', 'x', rowid, 0) + with blob as b: + self.assertEqual(id(blob), id(b)) + b.read(1) + # blob gives ValueError if you do operations on closed blob + self.assertRaises(ValueError, blob.read) + + self.db.cursor().execute("insert into blobby values(x'aabbccddee')") + rowid = self.db.last_insert_rowid() + blob = self.db.blobopen('main', 'blobby', 'x', rowid, 0) + try: + with blob as b: + self.assertEqual(id(blob), id(b)) + 1 / 0 + b.read(1) + except ZeroDivisionError: + # blob gives ValueError if you do operating on closed blob + self.assertRaises(ValueError, blob.read) + + # backup code + if not hasattr(self.db, "backup"): return # experimental + db2 = apsw.Connection(":memory:") + with db2.backup("main", self.db, "main") as b: + while not b.done: + b.step(1) + self.assertEqual(b.done, True) + self.assertDbIdentical(self.db, db2) + + def fillWithRandomStuff(self, db, seed=1): + "Fills a database with random content" + db.cursor().execute("create table a(x)") + for i in range(1, 11): + db.cursor().execute("insert into a values(?)", + ("aaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" * i * 8192, )) + + def assertDbIdentical(self, db1, db2): + "Ensures databases are identical" + c1 = db1.cursor() + c2 = db2.cursor() + self.assertEqual(list(c1.execute("select * from sqlite_master order by _ROWID_")), + list(c2.execute("select * from sqlite_master order by _ROWID_"))) + for table in db1.cursor().execute("select name from sqlite_master where type='table'"): + table = table[0] + self.assertEqual( + list(c1.execute("select * from [%s] order by _ROWID_" % (table, ))), + list(c2.execute("select * from [%s] order by _ROWID_" % (table, ))), + ) + for table in db2.cursor().execute("select name from sqlite_master where type='table'"): + table = table[0] + self.assertEqual( + list(c1.execute("select * from [%s] order by _ROWID_" % (table, ))), + list(c2.execute("select * from [%s] order by _ROWID_" % (table, ))), + ) + + def testBackup(self): + "Verify hot backup functionality" + # bad calls + self.assertRaises(TypeError, self.db.backup, "main", "main", "main", "main") + self.assertRaises(TypeError, self.db.backup, "main", 3, "main") + db2 = apsw.Connection(":memory:") + db2.close() + self.assertRaises(ValueError, self.db.backup, "main", db2, "main") + # can't copy self + self.assertRaises(ValueError, self.db.backup, "main", self.db, "it doesn't care what is here") + + # try and get inuse error + dbt = apsw.Connection(":memory:") + vals = {"stop": False, "raised": False} + + def wt(): + # worker thread spins grabbing and releasing inuse flag + while not vals["stop"]: + try: + dbt.setbusytimeout(100) + except apsw.ThreadingViolationError: + # this means main thread grabbed inuse first + pass + + t = ThreadRunner(wt) + t.start() + b4 = time.time() + # try to get inuse error for 30 seconds + try: + try: + while not vals["stop"] and time.time() - b4 < 30: + self.db.backup("main", dbt, "main").close() + except apsw.ThreadingViolationError: + vals["stop"] = True + vals["raised"] = True + finally: + vals["stop"] = True + + # standard usage + db2 = apsw.Connection(":memory:") + self.fillWithRandomStuff(db2) + + b = self.db.backup("main", db2, "main") + self.assertRaises(TypeError, b.step, '3') + try: + b.step(1) + self.assertTrue(b.remaining > 0) + self.assertTrue(b.pagecount > 0) + while not b.done: + b.step(1) + finally: + b.finish() + self.assertDbIdentical(self.db, db2) + self.db.cursor().execute("drop table a") + + # don't clean up + b = self.db.backup("main", db2, "main") + try: + while not b.done: + b.step(1) + finally: + b.finish() + + self.assertDbIdentical(self.db, db2) + del b + del db2 + fname = self.db.filename + self.db = None + gc.collect() + + # check dest db can't be used for anything else + db2 = apsw.Connection(":memory:") + c = db2.cursor() + c.execute("create table x(y); insert into x values(3); select * from x") + self.db = apsw.Connection(":memory:") + self.fillWithRandomStuff(self.db) + self.assertRaises(apsw.ThreadingViolationError, db2.backup, "main", self.db, "main") + c.close() + b = db2.backup("main", self.db, "main") + # double check cursor really is dead + self.assertRaises(apsw.CursorClosedError, c.execute, "select 3") + # with the backup object existing, all operations on db2 should fail + self.assertRaises(apsw.ThreadingViolationError, db2.cursor) + # finish and then trying to step + b.finish() + self.assertRaises(apsw.ConnectionClosedError, b.step) + + # make step and finish fail with locked error + self.db = apsw.Connection(fname) + + def lockerr(): + db2 = apsw.Connection(self.db.filename) + db2.cursor().execute("begin exclusive") + db3 = apsw.Connection(self.db.filename) + b = db3.backup("main", self.db, "main") + # if step gets busy then so does finish, but step has to be called at least once + self.assertRaises(apsw.BusyError, b.step) + return b + + b = lockerr() + b.close(True) + del b + b = lockerr() + self.assertRaises(apsw.BusyError, b.close, False) + del b + + b = lockerr() + self.assertRaises(apsw.BusyError, b.finish) + b.finish() # should be ok the second time + del b + + b = lockerr() + self.assertRaises(TypeError, b.close, "3") + self.assertRaises(apsw.BusyError, b.close, False) + b.close() # should also be ok + del b + + def f(): + b = lockerr() + del b + gc.collect() + + self.assertRaisesUnraisable(apsw.BusyError, f) + + # coverage + b = lockerr() + self.assertRaises(TypeError, b.__exit__, 3) + self.assertRaises(apsw.BusyError, b.__exit__, None, None, None) + b.__exit__(None, None, None) + + def testLog(self): + "Verifies logging functions" + self.assertRaises(TypeError, apsw.log) + self.assertRaises(TypeError, apsw.log, 1) + self.assertRaises(TypeError, apsw.log, 1, 2) + self.assertRaises(TypeError, apsw.log, 1, 2, 3) + self.assertRaises(TypeError, apsw.log, 1, None) + apsw.log(apsw.SQLITE_MISUSE, "Hello world") # nothing should happen + self.assertRaises(TypeError, apsw.config, apsw.SQLITE_CONFIG_LOG, 2) + self.assertRaises(TypeError, apsw.config, apsw.SQLITE_CONFIG_LOG) + # Can't change once SQLite is initialised + self.assertRaises(apsw.MisuseError, apsw.config, apsw.SQLITE_CONFIG_LOG, None) + # shutdown + self.db = None + gc.collect() + apsw.shutdown() + try: + apsw.config(apsw.SQLITE_CONFIG_LOG, None) + apsw.log(apsw.SQLITE_MISUSE, "Hello world") + called = [0] + + def handler(code, message, called=called): + called[0] += 1 + self.assertEqual(code, apsw.SQLITE_MISUSE) + self.assertEqual(message, u"a \u1234 unicode ' \ufe54 string \u0089") + + apsw.config(apsw.SQLITE_CONFIG_LOG, handler) + apsw.log(apsw.SQLITE_MISUSE, u"a \u1234 unicode ' \ufe54 string \u0089") + self.assertEqual(called[0], 1) + + def badhandler(code, message, called=called): + called[0] += 1 + self.assertEqual(code, apsw.SQLITE_NOMEM) + self.assertEqual(message, u"Xa \u1234 unicode ' \ufe54 string \u0089") + 1 / 0 + + apsw.config(apsw.SQLITE_CONFIG_LOG, badhandler) + self.assertRaisesUnraisable(ZeroDivisionError, apsw.log, apsw.SQLITE_NOMEM, + u"Xa \u1234 unicode ' \ufe54 string \u0089") + self.assertEqual(called[0], 2) + finally: + gc.collect() + apsw.shutdown() + apsw.config(apsw.SQLITE_CONFIG_LOG, None) + + def testReadonly(self): + "Check Connection.readonly()" + self.assertEqual(self.db.readonly("main"), False) + c = apsw.Connection(TESTFILEPREFIX + "testdb", flags=apsw.SQLITE_OPEN_READONLY) + self.assertEqual(c.readonly("main"), True) + self.assertRaises(apsw.SQLError, self.db.readonly, "sdfsd") + + class foo: + + def __str__(self): + 1 / 0 + + self.assertRaises(TypeError, self.db.readonly, foo()) + + def testFilename(self): + "Check connections and filenames" + self.assertTrue(self.db.filename.endswith("testdb")) + self.assertTrue(os.sep in self.db.filename) + self.assertEqual(self.db.filename, self.db.db_filename("main")) + self.db.cursor().execute("attach '%s' as foo" % (TESTFILEPREFIX + "testdb2", )) + self.assertEqual(self.db.filename + "2", self.db.db_filename("foo")) + + def testShell(self, shellclass=None): + "Check Shell functionality" + if shellclass is None: + shellclass = apsw.shell.Shell + + fh = [open(TESTFILEPREFIX + "test-shell-" + t, "w+", encoding="utf8") for t in ("in", "out", "err")] + kwargs = {"stdin": fh[0], "stdout": fh[1], "stderr": fh[2]} + + def reset(): + for i in fh: + i.truncate(0) + i.seek(0) + + def isempty(x): + self.assertEqual(get(x), "") + + def isnotempty(x): + self.assertNotEqual(len(get(x)), 0) + + def cmd(c): + assert fh[0].tell() == 0 + fh[0].truncate(0) + fh[0].seek(0) + fh[0].write(c) + fh[0].seek(0) + + def get(x): + x.seek(0) + return x.read() + + # Make one + shellclass(stdin=fh[0], stdout=fh[1], stderr=fh[2]) + + # Lets give it some harmless sql arguments and do a sanity check + s = shellclass(args=[TESTFILEPREFIX + "testdb", "create table x(x)", "insert into x values(1)"], **kwargs) + self.assertTrue(s.db.filename.endswith("testdb")) + # do a dump and check our table is there with its values + s.command_dump([]) + self.assertTrue("x(x)" in get(fh[1])) + self.assertTrue("(1);" in get(fh[1])) + + # empty args + self.assertEqual((None, [], []), s.process_args(None)) + + # input description + reset() + write_whole_file(TESTFILEPREFIX + "test-shell-1", "wt", "syntax error") + try: + shellclass(args=[TESTFILEPREFIX + "testdb", ".read %stest-shell-1" % (TESTFILEPREFIX, )], **kwargs) + except shellclass.Error: + self.assertTrue("test-shell-1" in get(fh[2])) + isempty(fh[1]) + + # Check single and double dash behave the same + reset() + try: + shellclass(args=["-init"], **kwargs) + except shellclass.Error: + isempty(fh[1]) + self.assertTrue("specify a filename" in get(fh[2])) + + reset() + s = shellclass(**kwargs) + try: + s.process_args(["--init"]) + except shellclass.Error: + self.assertTrue("specify a filename" in str(sys.exc_info()[1])) + + # various command line options + # an invalid one + reset() + try: + shellclass(args=["---tripledash"], **kwargs) + except shellclass.Error: + isempty(fh[1]) + self.assertTrue("-tripledash" in get(fh[2])) + self.assertTrue("--tripledash" not in get(fh[2])) + + ### + ### --init + ### + reset() + write_whole_file(TESTFILEPREFIX + "test-shell-1", "wt", "syntax error") + try: + shellclass(args=["-init", TESTFILEPREFIX + "test-shell-1"], **kwargs) + except shellclass.Error: + # we want to make sure it read the file + isempty(fh[1]) + self.assertTrue("syntax error" in get(fh[2])) + reset() + write_whole_file(TESTFILEPREFIX + "test-shell-1", "wt", "select 3;") + shellclass(args=["-init", TESTFILEPREFIX + "test-shell-1"], **kwargs) + # we want to make sure it read the file + isempty(fh[2]) + self.assertTrue("3" in get(fh[1])) + + ### + ### --header + ### + reset() + s = shellclass(**kwargs) + s.process_args(["--header"]) + self.assertEqual(s.header, True) + s.process_args(["--noheader"]) + self.assertEqual(s.header, False) + s.process_args(["--noheader", "-header", "-noheader", "--header"]) + self.assertEqual(s.header, True) + # did they actually turn on? + isempty(fh[1]) + isempty(fh[2]) + s.process_args([TESTFILEPREFIX + "testdb", ".mode column", "select 3"]) + isempty(fh[2]) + self.assertTrue("3" in get(fh[1])) + self.assertTrue("----" in get(fh[1])) + + ### + ### --echo, --bail, --interactive + ### + reset() + for v in ("echo", "bail", "interactive"): + s = shellclass(**kwargs) + b4 = getattr(s, v) + s.process_args(["--" + v]) + # setting should have changed + self.assertNotEqual(b4, getattr(s, v)) + isempty(fh[1]) + isempty(fh[2]) + + ### + ### --batch + ### + reset() + s = shellclass(**kwargs) + s.interactive = True + s.process_args(["-batch"]) + self.assertEqual(s.interactive, False) + isempty(fh[1]) + isempty(fh[2]) + + ### + ### --separator, --nullvalue, --encoding + ### + for v, val in ("separator", "\n"), ("nullvalue", "abcdef"), ("encoding", "iso8859-1"): + reset() + s = shellclass(args=["--" + v, val], **kwargs) + # We need the eval because shell processes backslashes in + # string. After deliberating that is the right thing to + # do + if v == "encoding": + self.assertEqual((val, None), getattr(s, v)) + else: + self.assertEqual(val, getattr(s, v)) + isempty(fh[1]) + isempty(fh[2]) + self.assertRaises(shellclass.Error, shellclass, args=["-" + v, val, "--" + v], **kwargs) + isempty(fh[1]) + self.assertTrue(v in get(fh[2])) + + ### + ### --version + ### + reset() + self.assertRaises(SystemExit, shellclass, args=["--version"], **kwargs) + # it writes to stdout + isempty(fh[2]) + self.assertTrue(apsw.sqlitelibversion() in get(fh[1])) + + ### + ### --help + ### + reset() + self.assertRaises(SystemExit, shellclass, args=["--help"], **kwargs) + # it writes to stderr + isempty(fh[1]) + self.assertTrue("-version" in get(fh[2])) + + ### + ### Items that correspond to output mode + ### + reset() + shellclass(args=[ + "--python", "--column", "--python", ":memory:", "create table x(x)", "insert into x values(x'aa')", + "select * from x;" + ], + **kwargs) + isempty(fh[2]) + self.assertTrue('b"' in get(fh[1]) or "buffer(" in get(fh[1])) + + ### + ### Is process_unknown_args called as documented? + ### + reset() + + class s2(shellclass): + + def process_unknown_args(self, args): + 1 / 0 + + self.assertRaises(ZeroDivisionError, s2, args=["--unknown"], **kwargs) + isempty(fh[1]) + self.assertTrue("division" in get(fh[2])) # py2 says "integer division", py3 says "int division" + + class s3(shellclass): + + def process_unknown_args(_, args): + self.assertEqual(args[0:2], ["myoption", "myvalue"]) + return args[2:] + + reset() + self.assertRaises(s3.Error, s3, args=["--python", "--myoption", "myvalue", "--init"], **kwargs) + isempty(fh[1]) + self.assertTrue("-init" in get(fh[2])) + + ### + ### .open + #### + reset() + s = shellclass(**kwargs) + self.assertTrue(s.db.filename == "") + for n in "testdb", "testdb2", "testdb3": + fn = TESTFILEPREFIX + n + reset() + cmd(".open " + fn) + s.cmdloop() + self.assertTrue(s.db.filename.endswith(fn)) + reset() + fn = TESTFILEPREFIX + "testdb" + cmd(".open " + fn) + cmd("create table foo(x); insert into foo values(2);") + s.cmdloop() + for row in s.db.cursor().execute("select * from foo"): + break + else: + self.fail("Table doesn't have any rows") + reset() + cmd(".open --new " + fn) + s.cmdloop() + for row in s.db.cursor().execute("select * from sqlite_master"): + self.fail("--new didn't wipe file") + + ### + ### Some test data + ### + reset() + s = shellclass(**kwargs) + s.cmdloop() + + def testnasty(): + reset() + # py 3 barfs with any codepoints above 0xffff whining + # about surrogates not being allowed. If only it + # implemented unicode properly. + cmd(u"create table if not exists nastydata(x,y); insert into nastydata values(null,'xxx\\u1234\\uabcdyyy\r\n\t\"this is nasty\u0001stuff!');" + ) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd(".bail on\n.header OFF\nselect * from nastydata;") + s.cmdloop() + isempty(fh[2]) + isnotempty(fh[1]) + + ### + ### Output formats - column + ### + reset() + x = 'a' * 20 + cmd(".mode column\n.header ON\nselect '" + x + "';") + s.cmdloop() + isempty(fh[2]) + # colwidth should be 2 more + sep = '-' * (len(x) + 2) # apostrophes quoting string in column header + out = get(fh[1]).replace("\n", "") + self.assertEqual(len(out.split(sep)), 2) + self.assertEqual(len(out.split(sep)[0]), len(x) + 2) # plus two apostrophes + self.assertEqual(len(out.split(sep)[1]), len(x) + 2) # same + self.assertTrue(" " in out.split(sep)[1]) # space padding + # make sure truncation happens + reset() + cmd(".width 5\nselect '" + x + "';\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue("a" * 6 not in get(fh[1])) + # right justification + reset() + cmd(".header off\n.width -3 -3\nselect 3,3;\n.width 3 3\nselect 3,3;") + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]) + self.assertTrue(v.startswith(" 3 3")) + v = v.split("\n") + self.assertNotEqual(v[0], v[1]) + self.assertEqual(len(v[0]), len(v[1])) + # do not output blob as is + self.assertTrue(u"\xaa" not in get(fh[1])) + # undo explain + reset() + cmd(".explain OFF\n") + s.cmdloop() + testnasty() + + ### + ### Output formats - csv + ### + reset() + # mode change should reset separator + cmd(".separator F\n.mode csv\nselect 3,3;\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue("3,3" in get(fh[1])) + # tab sep + reset() + cmd(".separator '\\t'\nselect 3,3;\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue("3\t3" in get(fh[1])) + # back to comma + reset() + cmd(".mode csv\nselect 3,3;\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue("3,3" in get(fh[1])) + # quoting + reset() + cmd(".header ON\nselect 3 as [\"one\"], 4 as [\t];\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue('"""one""",\t' in get(fh[1])) + # custom sep + reset() + cmd(".separator |\nselect 3 as [\"one\"], 4 as [\t];\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue("3|4\n" in get(fh[1])) + self.assertTrue('"one"|\t\n' in get(fh[1])) + # testnasty() - csv module is pretty much broken + + ### + ### Output formats - html + ### + reset() + cmd(".mode html\n.header OFF\nselect 3,4;\n") + s.cmdloop() + isempty(fh[2]) + # should be no header + self.assertTrue("" not in get(fh[1]).lower()) + # does it actually work? + self.assertTrue("3" in get(fh[1]).lower()) + # check quoting works + reset() + cmd(".header ON\nselect 3 as [<>&];\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue("<>&" in get(fh[1]).lower()) + # do we output rows? + self.assertTrue("" in get(fh[1]).lower()) + self.assertTrue("" in get(fh[1]).lower()) + testnasty() + + ### + ### Output formats - insert + ### + reset() + all = "3,3.1,'3.11',null,x'0311'" + cmd(".mode insert\n.header OFF\nselect " + all + ";\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue(all in get(fh[1]).lower()) + # empty values + reset() + all = "0,0.0,'',null,x''" + cmd("select " + all + ";\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue(all in get(fh[1]).lower()) + # header, separator and nullvalue should make no difference + save = get(fh[1]) + reset() + cmd(".header ON\n.separator %\n.nullvalue +\nselect " + all + ";\n") + s.cmdloop() + isempty(fh[2]) + self.assertEqual(save, get(fh[1])) + # check the table name + self.assertTrue(get(fh[1]).lower().startswith('insert into "table" values')) + reset() + cmd(".mode insert funkychicken\nselect " + all + ";\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue(get(fh[1]).lower().startswith("insert into funkychicken values")) + testnasty() + + ### + ### Output formats - json + ### + reset() + all = "3,2.2,'string',null,x'0311'" + cmd(".mode json\n.header ON\n select " + all + ";") + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]).strip() + v = v[:-1] # remove trailing comma + havejson = False + try: + import json + havejson = True + except ImportError: + try: + import simplejson as json + havejson = True + except ImportError: + pass + if havejson: + out = json.loads(v) + self.assertEqual(out, {"3": 3, "2.2": 2.2, "'string'": "string", "null": None, "x'0311'": "AxE="}) + # a regular table + reset() + cmd("create table jsontest([int], [float], [string], [null], [blob]);insert into jsontest values(" + all + + ");select * from jsontest;") + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]).strip()[:-1] + if havejson: + out = json.loads(v) + self.assertEqual(out, {"int": 3, "float": 2.2, "string": "string", "null": None, "blob": "AxE="}) + testnasty() + + ### + ### Output formats - line + ### + reset() + cmd(".header OFF\n.nullvalue *\n.mode line\nselect 3 as a, null as b, 0.0 as c, 'a' as d, x'aa' as e;\n") + s.cmdloop() + isempty(fh[2]) + out = get(fh[1]).replace(" ", "") + self.assertTrue("a=3\n" in out) + self.assertTrue("b=*\n" in out) + self.assertTrue("c=0.0\n" in out) + self.assertTrue("d=a\n" in out) + self.assertTrue("e=\n" in out) + self.assertEqual(7, len(out.split("\n"))) # one for each col plus two trailing newlines + # header should make no difference + reset() + cmd(".header ON\n.nullvalue *\n.mode line\nselect 3 as a, null as b, 0.0 as c, 'a' as d, x'aa' as e;\n") + s.cmdloop() + isempty(fh[2]) + self.assertEqual(out, get(fh[1]).replace(" ", "")) + # wide column name + reset() + ln = "kjsfhgjksfdjkgfhkjsdlafgjkhsdkjahfkjdsajfhsdja" * 12 + cmd("select 3 as %s, 3 as %s1;" % (ln, ln)) + s.cmdloop() + isempty(fh[2]) + self.assertEqual(get(fh[1]), " %s = 3\n%s1 = 3\n\n" % (ln, ln)) + testnasty() + + ### + ### Output formats - list + ### + reset() + cmd(".header off\n.mode list\n.nullvalue (\n.separator &\nselect 3 as a, null as b, 0.0 as c, 'a' as d, x'aa' as e;\n" + ) + s.cmdloop() + isempty(fh[2]) + self.assertEqual(get(fh[1]), '3&(&0.0&a&\n') + reset() + # header on + cmd(".header on\n.mode list\n.nullvalue (\n.separator &\nselect 3 as a, null as b, 0.0 as c, 'a' as d, x'aa' as e;\n" + ) + s.cmdloop() + isempty(fh[2]) + self.assertTrue(get(fh[1]).startswith("a&b&c&d&e\n")) + testnasty() + + ### + ### Output formats - python + ### + reset() + cmd(".header off\n.mode python\nselect 3 as a, null as b, 0.0 as c, 'a' as d, x'aa44bb' as e;\n") + s.cmdloop() + isempty(fh[2]) + v = eval(get(fh[1])) + self.assertEqual(len(v), 1) # 1 tuple + self.assertEqual(v, ((3, None, 0.0, 'a', b"\xaa\x44\xbb"), )) + reset() + cmd(".header on\n.mode python\nselect 3 as a, null as b, 0.0 as c, 'a' as d, x'aa44bb' as e;\n") + s.cmdloop() + isempty(fh[2]) + v = eval("(" + get(fh[1]) + ")") # need parentheses otherwise indent rules apply + self.assertEqual(len(v), 2) # headers and row + self.assertEqual(v, ( + ("a", "b", "c", "d", "e"), + (3, None, 0.0, 'a', b"\xaa\x44\xbb"), + )) + testnasty() + + ### + ### Output formats - TCL + ### + reset() + cmd(".header off\n.mode tcl\n.separator -\n.nullvalue ?\nselect 3 as a, null as b, 0.0 as c, 'a' as d, x'aa44bb' as e;\n" + ) + s.cmdloop() + isempty(fh[2]) + self.assertEqual(get(fh[1]), '"3"-"?"-"0.0"-"a"-"\\xAAD\\xBB"\n') + reset() + cmd(".header on\nselect 3 as a, null as b, 0.0 as c, 'a' as d, x'aa44bb' as e;\n") + s.cmdloop() + isempty(fh[2]) + self.assertTrue('"a"-"b"-"c"-"d"-"e"' in get(fh[1])) + testnasty() + + # What happens if db cannot be opened? + s.process_args(args=["/"]) + reset() + cmd("select * from sqlite_master;\n.bail on\nselect 3;\n") + self.assertRaises(apsw.CantOpenError, s.cmdloop) + isempty(fh[1]) + self.assertTrue("unable to open database file" in get(fh[2])) + + # echo testing - multiple statements + s.process_args([":memory:"]) # back to memory db + reset() + cmd(".bail off\n.echo on\nselect 3;\n") + s.cmdloop() + self.assertTrue("select 3;\n" in get(fh[2])) + # multiline + reset() + cmd("select 3;select 4;\n") + s.cmdloop() + self.assertTrue("select 3;\n" in get(fh[2])) + self.assertTrue("select 4;\n" in get(fh[2])) + # multiline with error + reset() + cmd("select 3;select error;select 4;\n") + s.cmdloop() + # worked line should be present + self.assertTrue("select 3;\n" in get(fh[2])) + # as should the error + self.assertTrue("no such column: error" in get(fh[2])) + # is timing info output correctly? + reset() + timersupported = False + try: + cmd(".bail on\n.echo off\n.timer on\n.timer off\n") + s.cmdloop() + timersupported = True + except s.Error: + pass + + if timersupported: + reset() + # create something that should take some time to execute + s.db.cursor().execute("create table xyz(x); begin;") + s.db.cursor().executemany("insert into xyz values(?)", randomintegers(4000)) + s.db.cursor().execute("end") + reset() + # this takes .6 seconds on my machine so we should + # definitely have non-zero timing information + cmd(".timer ON\nselect max(x),min(x),max(x+x),min(x-x) from xyz union select x+max(x),x-min(x),3,4 from xyz union select x,x,x,x from xyz union select x,x,x,x from xyz;select 3;\n" + ) + s.cmdloop() + isnotempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd(".bail off\n.timer off") + s.cmdloop() + + # command handling + reset() + cmd(".nonexist 'unclosed") + s.cmdloop() + isempty(fh[1]) + self.assertTrue("no closing quotation" in get(fh[2]).lower()) + reset() + cmd(".notexist ") + s.cmdloop() + isempty(fh[1]) + self.assertTrue('Unknown command "notexist"' in get(fh[2])) + + ### + ### Commands - backup and restore + ### + + reset() + cmd(".backup with too many parameters") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd(".backup ") # too few + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd(".restore with too many parameters") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd(".restore ") # too few + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + # bogus filenames + for i in ('/', '"main" /'): + for c in (".backup ", ".restore "): + reset() + cmd(c + i) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + + def randomtable(cur, dbname=None): + name = list("abcdefghijklmnopqrstuvwxtz") + random.shuffle(name) + name = "".join(name) + fullname = name + if dbname: + fullname = dbname + "." + fullname + cur.execute("begin;create table %s(x)" % (fullname, )) + cur.executemany("insert into %s values(?)" % (fullname, ), randomintegers(400)) + cur.execute("end") + return name + + # Straight forward backup. The gc.collect() is needed because + # non-gc cursors hanging around will prevent the backup from + # happening. + n = randomtable(s.db.cursor()) + contents = s.db.cursor().execute("select * from " + n).fetchall() + reset() + cmd(".backup %stestdb2" % (TESTFILEPREFIX, )) + gc.collect() + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd("drop table " + n + ";") + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + self.assertTrue(os.path.isfile("%stestdb2" % (TESTFILEPREFIX, ))) + reset() + cmd(".restore %stestdb2" % (TESTFILEPREFIX, )) + gc.collect() + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + newcontents = s.db.cursor().execute("select * from " + n).fetchall() + # no guarantee of result order + contents.sort() + newcontents.sort() + self.assertEqual(contents, newcontents) + + # do they pay attention to the dbname + s.db.cursor().execute("attach ':memory:' as memdb") + n = randomtable(s.db.cursor(), "memdb") + contents = s.db.cursor().execute("select * from memdb." + n).fetchall() + reset() + gc.collect() + cmd(".backup memdb %stestdb2" % (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + s.db.cursor().execute("detach memdb; attach ':memory:' as memdb2") + reset() + gc.collect() + cmd(".restore memdb2 %stestdb2" % (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + newcontents = s.db.cursor().execute("select * from memdb2." + n).fetchall() + # no guarantee of result order + contents.sort() + newcontents.sort() + self.assertEqual(contents, newcontents) + + ### + ### Commands - bail + ### + reset() + cmd(".bail") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd(".bail on\n.mode list\nselect 3;\nselect error;\nselect 4;\n") + self.assertRaises(apsw.Error, s.cmdloop) + self.assertTrue("3" in get(fh[1])) + self.assertTrue("4" not in get(fh[1])) + reset() + cmd(".bail oFf\n.mode list\nselect 3;\nselect error;\nselect 4;\n") + s.cmdloop() + self.assertTrue("3" in get(fh[1])) + self.assertTrue("4" in get(fh[1])) + + ### + ### Commands - databases + ### + reset() + cmd(".databases foo") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + # clean things up + s = shellclass(**kwargs) + reset() + cmd(".header oFF\n.databases") + s.cmdloop() + isempty(fh[2]) + for i in "main", "name", "file": + self.assertTrue(i in get(fh[1])) + reset() + cmd("attach '%stestdb' as quack;\n.databases" % (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[2]) + for i in "main", "name", "file", "testdb", "quack": + self.assertTrue(i in get(fh[1])) + reset() + cmd("detach quack;") + s.cmdloop() + isempty(fh[2]) + for i in "testdb", "quack": + self.assertTrue(i not in get(fh[1])) + + ### + ### Commands - dump + ### + reset() + cmd("create table foo(x); create table bar(x);\n.dump foox") + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd(".dump foo") + s.cmdloop() + isempty(fh[2]) + for i in "foo", "create table", "begin", "commit": + self.assertTrue(i in get(fh[1]).lower()) + self.assertTrue("bar" not in get(fh[1]).lower()) + # can we do virtual tables? + reset() + if self.checkOptionalExtension("fts3", "create virtual table foo using fts3()"): + reset() + cmd("CREATE virtual TaBlE fts3 using fts3(colA FRED , colB JOHN DOE);\n" + "insert into fts3 values('one', 'two');insert into fts3 values('onee', 'two');\n" + "insert into fts3 values('one', 'two two two');") + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd(".dump") + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]) + for i in "pragma writable_schema", "create virtual table fts3", "cola fred", "colb john doe": + self.assertTrue(i in v.lower()) + # analyze + reset() + cmd("drop table bar;create table bar(x unique,y);create index barf on bar(x,y);create index barff on bar(y);insert into bar values(3,4);\nanalyze;\n.dump bar" + ) + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]) + for i in "analyze bar", "create index barf": + self.assertTrue(i in v.lower()) + self.assertTrue("autoindex" not in v.lower()) # created by sqlite to do unique constraint + self.assertTrue("sqlite_sequence" not in v.lower()) # not autoincrements + # repeat but all tables + reset() + cmd(".dump") + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]) + for i in "analyze bar", "create index barf": + self.assertTrue(i in v.lower()) + self.assertTrue("autoindex" not in v.lower()) # created by sqlite to do unique constraint + # foreign keys + reset() + cmd("create table xxx(z references bar(x));\n.dump") + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]) + for i in "foreign_keys", "references": + self.assertTrue(i in v.lower()) + # views + reset() + cmd("create view noddy as select * from foo;\n.dump noddy") + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]) + for i in "drop view", "create view noddy": + self.assertTrue(i in v.lower()) + # issue82 - view ordering + reset() + cmd("create table issue82(x);create view issue82_2 as select * from issue82; create view issue82_1 as select count(*) from issue82_2;\n.dump issue82%" + ) + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]) + s.db.cursor().execute("drop table issue82 ; drop view issue82_1 ; drop view issue82_2") + reset() + cmd(v) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + # autoincrement + reset() + cmd("create table abc(x INTEGER PRIMARY KEY AUTOINCREMENT); insert into abc values(null);insert into abc values(null);\n.dump" + ) + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]) + for i in "sqlite_sequence", "'abc', 2": + self.assertTrue(i in v.lower()) + # user version + self.assertTrue("user_version" not in v) + reset() + cmd("pragma user_version=27;\n.dump") + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]) + self.assertTrue("pragma user_version=27;" in v) + s.db.cursor().execute("pragma user_version=0") + # some nasty stuff + reset() + cmd(u"create table nastydata(x,y); insert into nastydata values(null,'xxx\\u1234\\uabcd\\U00012345yyy\r\n\t\"this is nasty\u0001stuff!');" + 'create table "table"([except] int); create table [](""); create table [using]("&");') + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd(".dump") + s.cmdloop() + isempty(fh[2]) + v = get(fh[1]) + self.assertTrue("nasty" in v) + self.assertTrue("stuff" in v) + # sanity check the dumps + reset() + cmd(v) # should run just fine + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + # drop all the tables we made to do another dump and compare with before + for t in "abc", "bar", "foo", "fts3", "xxx", "noddy", "sqlite_sequence", "sqlite_stat1", \ + "issue82", "issue82_1", "issue82_2": + reset() + cmd("drop table %s;drop view %s;" % (t, t)) + s.cmdloop() # there will be errors which we ignore + reset() + cmd(v) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + # another dump + reset() + cmd(".dump") + s.cmdloop() + isempty(fh[2]) + v2 = get(fh[1]) + v = re.sub("-- Date:.*", "", v) + v2 = re.sub("-- Date:.*", "", v2) + self.assertEqual(v, v2) + # clean database + reset() + s = shellclass(args=[':memory:'], **kwargs) + cmd(v) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd(v2 + "\n.dump") + s.cmdloop() + isempty(fh[2]) + v3 = get(fh[1]) + v3 = re.sub("-- Date:.*", "", v3) + self.assertEqual(v, v3) + # trailing comments + reset() + cmd("""create table xxblah(b -- ff +) -- xx +; create index xxfoo on xxblah(b -- ff +) -- xx +; create view xxbar as select * from xxblah -- ff +; +insert into xxblah values(3); +.dump +""") + s.cmdloop() + isempty(fh[2]) + dump = get(fh[1]) + reset() + cmd("drop table xxblah; drop view xxbar;") + s.cmdloop() + isempty(fh[2]) + isempty(fh[1]) + reset() + cmd(dump) + s.cmdloop() + isempty(fh[2]) + isempty(fh[1]) + self.assertEqual(s.db.cursor().execute("select * from xxbar").fetchall(), [(3, )]) + # check index + reset() + cmd("drop index xxfoo;") + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + + ### + ### Command - echo + ### + reset() + cmd(".echo") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd(".echo bananas") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd(".echo on on") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd(".echo off\nselect 3;") + s.cmdloop() + self.assertTrue("3" in get(fh[1])) + self.assertTrue("select 3" not in get(fh[2])) + reset() + cmd(".echo on\nselect 3;") + s.cmdloop() + self.assertTrue("3" in get(fh[1])) + self.assertTrue("select 3" in get(fh[2])) + # more complex testing is done earlier including multiple statements and errors + + ### + ### Command - encoding + ### + self.suppressWarning("ResourceWarning") + for i in ".encoding one two", ".encoding", ".encoding utf8 another": + reset() + cmd(i) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd(".encoding this-does-not-exist") + s.cmdloop() + isempty(fh[1]) + self.assertTrue("no known encoding" in get(fh[2]).lower()) + # use iso8859-1 to make sure data is read correctly - it + # differs from utf8 + us = u"unitestdata \xaa\x89 34" + write_whole_file(TESTFILEPREFIX + "test-shell-1", + "w", + f"insert into enctest values('{ us }');\n", + encoding="iso8859-1") + gc.collect() + reset() + cmd(".encoding iso8859-1\ncreate table enctest(x);\n.echo on\n.read %stest-shell-1\n.echo off" % + (TESTFILEPREFIX, )) + s.cmdloop() + self.assertEqual(s.db.cursor().execute("select * from enctest").fetchall()[0][0], us) + self.assertTrue(us in get(fh[2])) + reset() + write_whole_file(TESTFILEPREFIX + "test-shell-1", "w", us + "\n", encoding="iso8859-1") + cmd("drop table enctest;create table enctest(x);\n.import %stest-shell-1 enctest" % (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[2]) + isempty(fh[1]) + self.assertEqual(s.db.cursor().execute("select * from enctest").fetchall()[0][0], us) + reset() + cmd(".output %stest-shell-1\n.mode list\nselect * from enctest;" % (TESTFILEPREFIX, )) + s.cmdloop() + self.assertEqual( + read_whole_file(TESTFILEPREFIX + "test-shell-1", "rb").strip(), # skip eol + us.encode("iso8859-1")) + reset() + cmd(".output stdout\nselect '%s';\n" % (us, )) + s.cmdloop() + isempty(fh[2]) + self.assertTrue(us in get(fh[1])) + + ### encoding specifying error handling - see issue 108 + reset() + cmd(".encoding utf8:replace") + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + # non-existent error + reset() + cmd(".encoding cp437:blahblah") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + self.assertTrue("blahblah" in get(fh[2])) + # check replace works + reset() + us = u"\N{BLACK STAR}8\N{WHITE STAR}" + write_whole_file(TESTFILEPREFIX + "test-shell-1", + "w", + f"insert into enctest values('{ us }');", + encoding="utf8") + cmd(".encoding utf8\n.read %stest-shell-1\n.encoding cp437:replace\n.output %stest-shell-1\nselect * from enctest;\n.encoding utf8\n.output stdout" + % (TESTFILEPREFIX, TESTFILEPREFIX)) + s.cmdloop() + isempty(fh[2]) + isempty(fh[1]) + self.assertTrue("?8?" in read_whole_file(TESTFILEPREFIX + "test-shell-1", "rt", "cp437")) + + ### + ### Command - exceptions + ### + reset() + cmd("syntax error;") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + self.assertTrue(len(get(fh[2]).split("\n")) < 5) + reset() + cmd(".exceptions on\nsyntax error;") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + self.assertTrue(len(get(fh[2]).split("\n")) > 10) + self.assertTrue("sql = " in get(fh[2])) + # deliberately leave exceptions on + + ### + ### Command - exit & quit + ### + for i in ".exit", ".quit": + reset() + cmd(i) + self.assertRaises(SystemExit, s.cmdloop) + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd(i + " jjgflk") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + + ### + ### Command explain and header are tested above + ### + # pass + + ### + ### Command find + ### + reset() + cmd(".find one two three") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd("create table findtest([x\" x],y); insert into findtest values(3, 'xx3'); insert into findtest values(34, 'abcd');" + ) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd(".find 3") + s.cmdloop() + isempty(fh[2]) + for text, present in (("findtest", True), ("xx3", True), ("34", False)): + if present: + self.assertTrue(text in get(fh[1])) + else: + self.assertTrue(text not in get(fh[1])) + reset() + cmd(".find does-not-exist") + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd(".find ab_d") + s.cmdloop() + isempty(fh[2]) + for text, present in (("findtest", True), ("xx3", False), ("34", True)): + if present: + self.assertTrue(text in get(fh[1])) + else: + self.assertTrue(text not in get(fh[1])) + reset() + cmd(".find 3 table-not-exist") + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + + ### + ### Command help + ### + reset() + cmd(".help\n.help all\n.help import backup") + s.cmdloop() + isempty(fh[1]) + for i in ".import", "Reads data from the file": + self.assertTrue(i in get(fh[2])) + reset() + cmd(".help backup notexist import") + s.cmdloop() + isempty(fh[1]) + for i in "Copies the contents", "No such command": + self.assertTrue(i in get(fh[2])) + # screw up terminal width + origtw = s._terminal_width + + def tw(*args): + return 7 + + s._terminal_width = tw + reset() + cmd(".bail on\n.help all\n.bail off") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + + ### + ### Command - import + ### + # check it fundamentally works + reset() + cmd(".encoding utf16\ncreate table imptest(x real, y char);\n" + "insert into imptest values(3.1, 'xabc');\n" + "insert into imptest values(3.2, 'xabfff\"ffffc');\n" + ".output %stest-shell-1\n.mode csv\nselect * from imptest;\n" + ".output stdout" % (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + # make sure encoding took + self.assertTrue(b"xab" not in read_whole_file(TESTFILEPREFIX + "test-shell-1", "rb")) + data = s.db.cursor().execute("select * from imptest; delete from imptest").fetchall() + self.assertEqual(2, len(data)) + reset() + cmd(".import %stest-shell-1 imptest" % (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + newdata = s.db.cursor().execute("select * from imptest; drop table imptest").fetchall() + data.sort() + newdata.sort() + self.assertEqual(data, newdata) + # error handling + for i in ".import", ".import one", ".import one two three", ".import nosuchfile nosuchtable", ".import nosuchfile sqlite_master": + reset() + cmd(i) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + # wrong number of columns + reset() + cmd("create table imptest(x,y);\n.mode tabs\n.output %stest-shell-1\nselect 3,4;select 5,6;select 7,8,9;" % + (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd(".output stdout\n.import %stest-shell-1 imptest" % (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + # check it was done in a transaction and aborted + self.assertEqual(0, s.db.cursor().execute("select count(*) from imptest").fetchall()[0][0]) + + ### + ### Command - autoimport + ### + + # errors + for i in ".autoimport", ".autoimport 1 2 3", ".autoimport nosuchfile", ".autoimport %stest-shell-1 sqlite_master" % ( + TESTFILEPREFIX, ): + reset() + cmd(i) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + + # check correct detection with each type of separator and that types are not mangled + c = s.db.cursor() + for row in ( + ('a,b', '21/1/20', '00'), + (' ', '1/1/20', 10), + ('a"b', '1/1/01', '00'), + ('+40', '01123', '2010 100 15'), + ('2010//10//13', '2010/10/13 12', 2), + ('2010/13/13 12:13', '13/13/2010 12:93', '13/2010/13'), + ("+3", " 3", 3), + ("03.03", "03.03.20", "03"), + ( + (None, 2, 5.5), + (None, 4, 99), + ), + ): + + c.execute("""drop table if exists aitest ; create table aitest("x y", ["], "3d")""") + if isinstance(row[0], tuple): + f = c.executemany + else: + f = c.execute + f("insert into aitest values(?,?,?)", row) + fname = TESTFILEPREFIX + "test-shell-1" + for sep in "\t", "|", ",", "X": + reset() + cmd(".mode csv\n.headers on\n.output %stest-shell-1\n.separator \"%s\"\nselect * from aitest;\n.output stdout\n.separator X\ndrop table if exists \"test-shell-1\";\n.autoimport %stest-shell-1" + % (TESTFILEPREFIX, sep, TESTFILEPREFIX)) + s.cmdloop() + isnotempty(fh[1]) + isempty(fh[2]) + self.assertTablesEqual(s.db, "aitest", s.db, "test-shell-1") + + # Change encoding back to sensible + reset() + cmd(".encoding utf8") + s.cmdloop() + + # Check date detection + for expect, fmt, sequences in (("1999-10-13", "%d-%d:%d", ( + (1999, 10, 13), + (13, 10, 1999), + (10, 13, 1999), + )), ("1999-10-13T12:14:17", "%d/%d/%d/%d/%d/%d", ( + (1999, 10, 13, 12, 14, 17), + (13, 10, 1999, 12, 14, 17), + (10, 13, 1999, 12, 14, 17), + )), ("1999-10-13T12:14:00", "%dX%dX%dX%dX%d", ( + (1999, 10, 13, 12, 14), + (13, 10, 1999, 12, 14), + (10, 13, 1999, 12, 14), + ))): + for seq in sequences: + write_whole_file(TESTFILEPREFIX + "test-shell-1", "wt", ("a,b\nrow," + (fmt % seq) + "\n")) + reset() + cmd("drop table [test-shell-1];\n.autoimport %stest-shell-1" % (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[2]) + imp = c.execute("select b from [test-shell-1] where a='row'").fetchall()[0][0] + self.assertEqual(imp, expect) + + # Check diagnostics when unable to import + for err, content in ( + ("current encoding", b"\x81\x82\x83\tfoo\n\x84\x97\xff\tbar"), + ("known type", "abcdef\nhiojklmnop\n"), + ("more than one", 'ab,c\tdef\nqr,dd\t\n'), + ("ambiguous data format", "a,b\n1/1/2001,3\n2001/4/4,4\n"), + ): + if isinstance(content, bytes): + continue + write_whole_file(TESTFILEPREFIX + "test-shell-1", "wt", content) + reset() + cmd("drop table [test-shell-1];\n.autoimport %stest-shell-1" % (TESTFILEPREFIX, )) + s.cmdloop() + errmsg = get(fh[2]) + self.assertTrue(err in errmsg) + + ### + ### Command - indices + ### + for i in ".indices", ".indices one two": + reset() + cmd(i) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd("create table indices(x unique, y unique); create index shouldseethis on indices(x,y);") + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + reset() + cmd(".indices indices") + s.cmdloop() + isempty(fh[2]) + for i in "shouldseethis", "autoindex": + self.assertTrue(i in get(fh[1])) + + ### + ### Command - load + ### + if hasattr(APSW, "testLoadExtension"): + lf = LOADEXTENSIONFILENAME + for i in ".load", ".load one two three": + reset() + cmd(i) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + reset() + cmd(".load nosuchfile") + s.cmdloop() + isempty(fh[1]) + self.assertTrue("nosuchfile" in get(fh[2]) or "ExtensionLoadingError" in get(fh[2])) + reset() + cmd(".mode list\n.load " + lf + " alternate_sqlite3_extension_init\nselect doubleup(2);") + s.cmdloop() + isempty(fh[2]) + self.assertTrue("4" in get(fh[1])) + reset() + cmd(".mode list\n.load " + lf + "\nselect half(2);") + s.cmdloop() + isempty(fh[2]) + self.assertTrue("1" in get(fh[1])) + + ### + ### Command - mode + ### + # already thoroughly tested in code above + for i in ".mode", ".mode foo more", ".mode invalid": + reset() + cmd(i) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + + ### + ### command nullvalue & separator + ### + # already tested in code above + for i in ".nullvalue", ".nullvalue jkhkl lkjkj", ".separator", ".separator one two": + reset() + cmd(i) + b4 = s.nullvalue, s.separator + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + self.assertEqual(b4, (s.nullvalue, s.separator)) + + ### + ### command output + ### + for i in ".output", ".output too many args", ".output " + os.sep: + reset() + cmd(i) + b4 = s.stdout + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + self.assertEqual(b4, s.stdout) + + ### + ### Command prompt + ### + # not much to test until pty testing is working + for i in ".prompt", ".prompt too many args": + reset() + cmd(i) + b4 = s.prompt, s.moreprompt + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + self.assertEqual(b4, (s.prompt, s.moreprompt)) + + ### + ### Command read + ### + # pretty much thoroughly tested above + write_whole_file(TESTFILEPREFIX + "test-shell-1.py", "wt", """ +assert apsw +assert shell +shell.write(shell.stdout, "hello world\\n") +""") + for i in ".read", ".read one two", ".read " + os.sep: + reset() + cmd(i) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + + reset() + cmd(".read %stest-shell-1.py" % (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[2]) + self.assertTrue("hello world" in get(fh[1])) + + # restore tested with backup + + ### + ### Command - schema + ### + # make sure it works + reset() + cmd(".schema") + s.cmdloop() + isempty(fh[2]) + isnotempty(fh[1]) + reset() + cmd("create table schematest(x);create index unrelatedname on schematest(x);\n.schema schematest foo notexist foo" + ) + s.cmdloop() + isempty(fh[2]) + for i in "schematest", "unrelatedname": + self.assertTrue(i in get(fh[1])) + + # separator done earlier + + ### + ### Command - show + ### + # set all settings to known values + resetcmd = ".echo off\n.explain off\n.headers off\n.mode list\n.nullvalue ''\n.output stdout\n.separator |\n.width 1 2 3\n.exceptions off" + reset() + cmd(resetcmd) + s.cmdloop() + isempty(fh[2]) + isempty(fh[1]) + reset() + cmd(".show") + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + baseline = get(fh[2]) + for i in ".echo on", ".explain", ".headers on", ".mode column", ".nullvalue T", ".separator %", ".width 8 9 1", ".exceptions on": + reset() + cmd(resetcmd) + s.cmdloop() + isempty(fh[1]) + if not get(fh[2]).startswith(".echo off"): + isempty(fh[2]) + reset() + cmd(i + "\n.show") + s.cmdloop() + isempty(fh[1]) + # check size has not changed much + self.assertTrue(abs(len(get(fh[2])) - len(baseline)) < 14) + + # output + reset() + cmd(".output %stest-shell-1\n.show" % (TESTFILEPREFIX, )) + s.cmdloop() + isempty(fh[1]) + self.assertTrue("output: " + TESTFILEPREFIX + "test-shell-1" in get(fh[2])) + reset() + cmd(".output stdout\n.show") + s.cmdloop() + isempty(fh[1]) + self.assertTrue("output: stdout" in get(fh[2])) + self.assertTrue(not os.path.exists("stdout")) + # errors + reset() + cmd(".show one two") + s.cmdloop() + isempty(fh[1]) + self.assertTrue("at most one parameter" in get(fh[2])) + reset() + cmd(".show notexist") + s.cmdloop() + isempty(fh[1]) + self.assertTrue("notexist: " not in get(fh[2])) + + ### + ### Command tables + ### + reset() + cmd(".tables") + s.cmdloop() + isempty(fh[2]) + isnotempty(fh[1]) + reset() + cmd("create table tabletest(x);create index tabletest1 on tabletest(x);create index noway on tabletest(x);\n.tables tabletest\n.tables" + ) + s.cmdloop() + isempty(fh[2]) + self.assertTrue("tabletest" in get(fh[1])) + self.assertTrue("tabletest1" not in get(fh[1])) + self.assertTrue("noway" not in get(fh[1])) + + ### + ### Command timeout + ### + for i in (".timeout", ".timeout ksdjfh", ".timeout 6576 78987"): + reset() + cmd(i) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + for i in (".timeout 1000", ".timeout 0", ".timeout -33"): + reset() + cmd(i) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + + # timer is tested earlier + + ### + ### Command width + ### + # does it work? + reset() + cmd(".width 10 10 10 0") + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + + def getw(): + reset() + cmd(".show width") + s.cmdloop() + isempty(fh[1]) + return [int(x) for x in get(fh[2]).split()[1:]] + + self.assertEqual([10, 10, 10, 0], getw()) + # some errors + for i in ".width", ".width foo", ".width 1 2 3 seven 3": + reset() + cmd(i) + s.cmdloop() + isempty(fh[1]) + isnotempty(fh[2]) + self.assertEqual([10, 10, 10, 0], getw()) + for i, r in ("9 0 9", [9, 0, 9]), ("10 -3 10 -3", [10, -3, 10, -3]), ("0", [0]): + reset() + cmd(".width " + i) + s.cmdloop() + isempty(fh[1]) + isempty(fh[2]) + self.assertEqual(r, getw()) + + ### + ### Unicode output with all output modes + ### + colname = u"\N{BLACK STAR}8\N{WHITE STAR}" + val = u'xxx\u1234\uabcdyyy this\" is nasty\u0001stuff!' + noheadermodes = ('insert', ) + # possible ways val can be represented (eg csv doubles up double quotes) + outputs = (val, val.replace('"', '""'), val.replace('"', '"'), val.replace('"', '\\"')) + for mode in [x[len("output_"):] for x in dir(shellclass) if x.startswith("output_")]: + reset() + cmd(".separator |\n.width 999\n.encoding utf8\n.header on\n.mode %s\nselect '%s' as '%s';" % + (mode, val, colname)) + s.cmdloop() + isempty(fh[2]) + # modes too complicated to construct the correct string + if mode in ('python', 'tcl'): + continue + # all others + if mode not in noheadermodes: + self.assertTrue(colname in get(fh[1])) + cnt = 0 + for o in outputs: + cnt += o in get(fh[1]) + self.assertTrue(cnt) + + # clean up files + for f in fh: + f.close() + + # This one uses the coverage module + def _testShellWithCoverage(self): + "Check Shell functionality (with coverage)" + # We currently allow coverage module to not exist which helps + # with debugging + try: + import coverage + except ImportError: + coverage = None + + import importlib.util + # I had problems with the compiled bytecode being around + for suff in "c", "o": + try: + os.remove("apsw/shell.py" + suff) + except: + pass + + spec = importlib.util.spec_from_file_location("shell_coverage", "apsw/shell.py") + module = importlib.util.module_from_spec(spec) + sys.modules[module.__name__] = module + if coverage: coverage.start() + spec.loader.exec_module(module) + try: + self._originaltestShell(shellclass=module.Shell) + finally: + if coverage: + coverage.stop() + coverage.annotate(morfs=[module]) + os.rename("apsw/shell.py,cover", "shell.py.gcov") + + # Note that faults fire only once, so there is no need to reset + # them. The testing for objects bigger than 2GB is done in + # testLargeObjects + def testzzFaultInjection(self): + "Deliberately inject faults to exercise all code paths" + if not hasattr(apsw, "faultdict"): + return + + # Verify we test all fault locations + code = [] + for fn in glob.glob("*/*.c"): + with open(fn, encoding="utf8") as f: + code.append(f.read()) + code = "\n".join(code) + + with open(__file__, "rt", encoding="utf8") as f: + test_code = f.read() + + seen = set() + + for macro, faultname in re.findall(r"(APSW_FAULT_INJECT|GET_BUFFER|STRING_NEW)\s*[(]\s*(?P.*?)\s*,", + code): + if faultname == "faultName": + continue + if faultname not in test_code and not faultname.startswith("BackupDependent"): + raise Exception(f"Fault injected { faultname } not found in tests.py") + if faultname in seen: + raise Exception(f"Fault { faultname } seen multiple times") + seen.add(faultname) + + def dummy(*args): + 1 / 0 + + def dummy2(*args): + return 7 + + # The 1/0 in these tests is to cause a ZeroDivisionError so + # that an exception is always thrown. If we catch that then + # it means earlier expected exceptions were not thrown. + + ## UnknownSQLiteErrorCode + apsw.faultdict["UnknownSQLiteErrorCode"] = True + try: + self.db.cursor().execute("select '") + 1 / 0 + except: + klass, value = sys.exc_info()[:2] + self.assertTrue(klass is apsw.Error) + self.assertTrue("254" in str(value)) + + ## ConnectionCloseFail + if "APSW_NO_MEMLEAK" not in os.environ: + apsw.faultdict["ConnectionCloseFail"] = True + try: + db = apsw.Connection(":memory:") + db.cursor().execute("select 3") + db.close(True) + 1 / 0 + except apsw.IOError: + pass + + ## ConnectionCloseFail in destructor + if "APSW_NO_MEMLEAK" not in os.environ: + # test + apsw.faultdict["ConnectionCloseFail"] = True + + def f(): + db = apsw.Connection(":memory:") + db.cursor().execute("select 3") + del db + gc.collect() + + self.assertRaisesUnraisable(apsw.ConnectionNotClosedError, f) + + ## BlobAllocFails + apsw.faultdict["BlobAllocFails"] = True + try: + db = apsw.Connection(":memory:") + db.cursor().execute("create table foo(ablob); insert into foo (ROWID, ablob) values (1,x'aabbccddeeff')") + blob = db.blobopen("main", "foo", "ablob", 1, False) + 1 / 0 + except MemoryError: + pass + + ## CursorAllocFails + apsw.faultdict["CursorAllocFails"] = True + try: + db = apsw.Connection(":memory:") + db.cursor().execute("select 3") + 1 / 0 + except MemoryError: + pass + + ## DBConfigFails + apsw.faultdict["DBConfigFails"] = True + try: + db = apsw.Connection(":memory:") + db.config(apsw.SQLITE_DBCONFIG_ENABLE_TRIGGER, -1) + 1 / 0 + except apsw.NoMemError: + pass + + ## RollbackHookExistingError + apsw.faultdict["RollbackHookExistingError"] = True + try: + db = apsw.Connection(":memory:") + db.setrollbackhook(dummy) + db.cursor().execute("create table foo(a); begin ; insert into foo values(3); rollback") + 1 / 0 + except MemoryError: + pass + + ## CommitHookExceptionAlready + apsw.faultdict["CommitHookExistingError"] = True + try: + db = apsw.Connection(":memory:") + db.setcommithook(dummy) + db.cursor().execute("begin; create table foo(a); insert into foo values(3); commit") + 1 / 0 + except MemoryError: + pass + + ## AuthorizerExistingError + apsw.faultdict["AuthorizerExistingError"] = True + try: + db = apsw.Connection(":memory:") + db.setauthorizer(dummy) + db.cursor().execute("create table foo(a)") + 1 / 0 + except MemoryError: + pass + + ## SetAuthorizerFail + apsw.faultdict["SetAuthorizerFail"] = True + try: + db = apsw.Connection(":memory:") + db.setauthorizer(dummy) + 1 / 0 + except: + pass + + apsw.faultdict["SetAuthorizerFail"] = True + try: + db = apsw.Connection(":memory:") + db.authorizer = None + 1 / 0 + except: + pass + + ## CollationNeededNullFail + apsw.faultdict["CollationNeededNullFail"] = True + try: + db = apsw.Connection(":memory:") + db.collationneeded(None) + 1 / 0 + except apsw.IOError: + klass, value = sys.exc_info()[:2] + self.assertTrue(klass is apsw.IOError) + + ## CollationNeededFail + apsw.faultdict["CollationNeededFail"] = True + try: + db = apsw.Connection(":memory:") + db.collationneeded(dummy) + 1 / 0 + except: + klass, value = sys.exc_info()[:2] + self.assertTrue(klass is apsw.IOError) + + ##EnableLoadExtensionFail + apsw.faultdict["EnableLoadExtensionFail"] = True + try: + db = apsw.Connection(":memory:") + db.enableloadextension(True) + 1 / 0 + except: + pass + + ## SetBusyHandlerNullFail + apsw.faultdict["SetBusyHandlerNullFail"] = True + try: + db = apsw.Connection(":memory:") + db.setbusyhandler(None) + 1 / 0 + except apsw.IOError: + pass + + ## SetBusyHandlerFail + apsw.faultdict["SetBusyHandlerFail"] = True + try: + db = apsw.Connection(":memory:") + db.setbusyhandler(dummy) + 1 / 0 + except apsw.IOError: + pass + + ## UnknownValueType + apsw.faultdict["UnknownValueType"] = True + try: + db = apsw.Connection(":memory:") + db.createscalarfunction("dummy", dummy) + db.cursor().execute("select dummy(4)") + 1 / 0 + except: + klass, value = sys.exc_info()[:2] + self.assertTrue(klass is apsw.Error) + self.assertTrue("123456" in str(value)) + + ## UnknownColumnType + apsw.faultdict["UnknownColumnType"] = True + try: + db = apsw.Connection(":memory:") + for row in db.cursor().execute("select 3"): + pass + 1 / 0 + except: + klass, value = sys.exc_info()[:2] + self.assertTrue(klass is apsw.Error) + self.assertTrue("12348" in str(value)) + + ## SetContextResultUnicodeConversionFails + apsw.faultdict["SetContextResultUnicodeConversionFails"] = True + try: + db = apsw.Connection(":memory:") + db.createscalarfunction("foo", lambda x: u"another unicode string") + for row in db.cursor().execute("select foo(3)"): + pass + 1 / 0 + except MemoryError: + pass + + ## SetContextResultAsReadBufferFail + apsw.faultdict["SetContextResultAsReadBufferFail"] = True + try: + db = apsw.Connection(":memory:") + db.createscalarfunction("foo", lambda x: b"another string") + for row in db.cursor().execute("select foo(3)"): + pass + 1 / 0 + except MemoryError: + pass + + ## GFAPyTuple_NewFail + apsw.faultdict["GFAPyTuple_NewFail"] = True + try: + db = apsw.Connection(":memory:") + db.createscalarfunction("foo", dummy) + for row in db.cursor().execute("select foo(3)"): + pass + 1 / 0 + except MemoryError: + pass + + ## Same again + apsw.faultdict["GFAPyTuple_NewFail"] = True + try: + db = apsw.Connection(":memory:") + + def foo(): + return None, dummy2, dummy2 + + db.createaggregatefunction("foo", foo) + for row in db.cursor().execute("create table bar(x);insert into bar values(3); select foo(x) from bar"): + pass + 1 / 0 + except MemoryError: + pass + + ## AutovacuumPagesFails + apsw.faultdict["AutovacuumPagesFails"] = True + self.assertRaises(apsw.NoMemError, self.db.autovacuum_pages, lambda x: x) + + ## CBDispatchExistingError + apsw.faultdict["CBDispatchExistingError"] = True + try: + db = apsw.Connection(":memory:") + db.createscalarfunction("foo", dummy) + db.cursor().execute("select foo(3)") + 1 / 0 + except MemoryError: + pass + + ## CBDispatchFinalError + apsw.faultdict["CBDispatchFinalError"] = True + try: + + def f(): + db = apsw.Connection(":memory:") + + def foo(): + return None, dummy, dummy2 + + db.createaggregatefunction("foo", foo) + for row in db.cursor().execute("create table bar(x);insert into bar values(3); select foo(x) from bar"): + pass + 1 / 0 + + self.assertRaisesUnraisable(Exception, f) + except ZeroDivisionError: + pass + + ## DeserializeMallocFail + apsw.faultdict["DeserializeMallocFail"] = True + self.assertRaises(MemoryError, self.db.deserialize, "main", b"aaaaaa") + + ## Virtual table code + class Source: + + def Create(self, *args): + return "create table foo(x,y)", Table() + + Connect = Create + + class Table: + + def __init__(self): + self.data = [ #("rowid", "x", "y"), + [0, 1, 2], [3, 4, 5] + ] + + def Open(self): + return Cursor(self) + + def BestIndex(self, *args): + return None + + def UpdateChangeRow(self, rowid, newrowid, fields): + for i, row in enumerate(self.data): + if row[0] == rowid: + self.data[i] = [newrowid] + list(fields) + + def FindFunction(self, *args): + return lambda *args: 1 + + class Cursor: + + def __init__(self, table): + self.table = table + self.row = 0 + + def Eof(self): + return self.row >= len(self.table.data) + + def Rowid(self): + return self.table.data[self.row][0] + + def Column(self, col): + return self.table.data[self.row][1 + col] + + def Filter(self, *args): + self.row = 0 + + def Next(self): + self.row += 1 + + def Close(self): + pass + + ## VtabCreateBadString + apsw.faultdict["VtabCreateBadString"] = True + try: + db = apsw.Connection(":memory:") + db.createmodule("nonsense", None) + db.cursor().execute("create virtual table foo using nonsense(3,4)") + 1 / 0 + except MemoryError: + pass + + ## VtabUpdateChangeRowFail + apsw.faultdict["VtabUpdateChangeRowFail"] = True + try: + db = apsw.Connection(":memory:") + db.createmodule("foo", Source()) + db.cursor().execute("create virtual table foo using foo();update foo set x=3 where y=2") + 1 / 0 + except MemoryError: + pass + + ## VtabUpdateBadField + apsw.faultdict["VtabUpdateBadField"] = True + try: + db = apsw.Connection(":memory:") + db.createmodule("foo", Source()) + db.cursor().execute("create virtual table foo using foo();update foo set x=3 where y=2") + 1 / 0 + except MemoryError: + pass + + ## VtabRenameBadName + apsw.faultdict["VtabRenameBadName"] = True + try: + db = apsw.Connection(":memory:") + db.createmodule("foo", Source()) + db.cursor().execute("create virtual table foo using foo(); alter table foo rename to bar") + 1 / 0 + except MemoryError: + pass + + ## VtabRenameBadName + apsw.faultdict["CreateModuleFail"] = True + try: + db = apsw.Connection(":memory:") + db.createmodule("foo", Source()) + 1 / 0 + except apsw.IOError: + pass + + ## FindFunctionAllocFailed + apsw.faultdict["FindFunctionAllocFailed"] = True + try: + db = apsw.Connection(":memory:") + db.overloadfunction("xyz", 2) + db.createmodule("foo", Source()) + db.cursor().execute("create virtual table foo using foo()") + db.cursor().execute("select xyz(x,y) from foo") + 1 / 0 + except MemoryError: + pass + + ## BlobDeallocException + def f(): + db = apsw.Connection(":memory:") + db.cursor().execute("create table foo(b);insert into foo(rowid,b) values(2,x'aabbccddee')") + blob = db.blobopen("main", "foo", "b", 2, False) # open read-only + # deliberately cause problem + try: + blob.write(b'a') + except apsw.ReadOnlyError: + pass + # garbage collect + del blob + gc.collect() + + self.assertRaisesUnraisable(apsw.ReadOnlyError, f) + + ## GetDescriptionFail + apsw.faultdict["GetDescriptionFail"] = True + try: + db = apsw.Connection(":memory:") + c = db.cursor() + c.execute("create table foo(b);insert into foo(rowid,b) values(2,x'aabbccddee');select * from foo") + c.getdescription() + 1 / 0 + except MemoryError: + pass + + ## DoBindingUnicodeConversionFails + apsw.faultdict["DoBindingUnicodeConversionFails"] = True + try: + db = apsw.Connection(":memory:") + db.cursor().execute("select ?", (u"abc", )) + 1 / 0 + except MemoryError: + pass + + ## DoBindingAsReadBufferFails + apsw.faultdict["DoBindingAsReadBufferFails"] = True + try: + db = apsw.Connection(":memory:") + db.cursor().execute("select ?", (b"abcd", )) + 1 / 0 + except MemoryError: + pass + + ## DoExecTraceBadSlice + apsw.faultdict["DoExecTraceBadSlice"] = True + try: + db = apsw.Connection(":memory:") + c = db.cursor() + c.setexectrace(dummy) + c.execute("select ?; select ?; select ?", (1, 2, 3)) + 1 / 0 + except MemoryError: + pass + + ## EnableSharedCacheFail + apsw.faultdict["EnableSharedCacheFail"] = True + try: + apsw.enablesharedcache(True) + 1 / 0 + except apsw.NoMemError: + pass + + ## InitializeFail + apsw.faultdict["InitializeFail"] = True + try: + apsw.initialize() + 1 / 0 + except apsw.NoMemError: + pass + + ## ShutdownFail + apsw.faultdict["ShutdownFail"] = True + try: + apsw.shutdown() + 1 / 0 + except apsw.NoMemError: + pass + + ### statement cache stuff + for key in ("SCStatsBuildFail", "SCStatsListFail", "SCStatsEntryBuildFail", "SCStatsAppendFail", "SCStatsEntriesSetFail"): + # this ensures stuff is in statement cache + self.db.execute("Select ?", (key,)).fetchall() + apsw.faultdict[key] = True + self.assertRaises(MemoryError, self.db.cache_stats, True) + + ### vfs routines + + class FaultVFS(apsw.VFS): + + def __init__(self, name="faultvfs", inherit="", makedefault=False): + super(FaultVFS, self).__init__(name, inherit, makedefault=makedefault) + + def xGetLastErrorLong(self): + return "a" * 1024, None + + def xOpen(self, name, flags): + return FaultVFSFile(name, flags) + + class FaultVFSFile(apsw.VFSFile): + + def __init__(self, name, flags): + super(FaultVFSFile, self).__init__("", name, flags) + + vfs = FaultVFS() + + ## xFullPathnameConversion + apsw.faultdict["xFullPathnameConversion"] = True + self.assertRaises(apsw.SQLError, + self.assertRaisesUnraisable, + MemoryError, + apsw.Connection, + TESTFILEPREFIX + "testdb", + vfs="faultvfs") + + ## xDlError + db = apsw.Connection(":memory:", vfs="faultvfs") + if hasattr(db, 'enableloadextension'): + db.enableloadextension(True) + ## xDlErrorAllocFail + apsw.faultdict["xDlErrorAllocFail"] = True + self.assertRaises(apsw.ExtensionLoadingError, self.assertRaisesUnraisable, MemoryError, db.loadextension, + "non-existent-file-name") + ## xDlErrorUnicodeFail + apsw.faultdict["xDlErrorUnicodeFail"] = True + self.assertRaises(apsw.ExtensionLoadingError, self.assertRaisesUnraisable, MemoryError, db.loadextension, + "non-existent-file-name") + del db + gc.collect() + ## xRandomnessAllocFail + # we need to be default vfs + vfs2 = FaultVFS("faultvfs2", apsw.vfsnames()[0], makedefault=True) + apsw.randomness(0) + apsw.faultdict["xRandomnessAllocFail"] = True + # doesn't matter which vfs opens the file + self.assertRaisesUnraisable(MemoryError, + apsw.Connection(":memory:").cursor().execute, "select randomblob(10)") + del vfs2 + gc.collect() + + ## xCurrentTimeFail + apsw.faultdict["xCurrentTimeFail"] = True + self.assertRaisesUnraisable(apsw.SQLError, + apsw.Connection(":memory:", vfs="faultvfs").cursor().execute, "select date('now')") + + ## APSWVFSDeallocFail + apsw.faultdict["APSWVFSDeallocFail"] = True + + def foo(): + vfs2 = FaultVFS("faultvfs2", "faultvfs") + del vfs2 + gc.collect() + + self.assertRaisesUnraisable(apsw.IOError, foo) + + ## APSWVFSBadVersion + apsw.faultdict["APSWVFSBadVersion"] = True + self.assertRaises(ValueError, apsw.VFS, "foo", "") + self.assertTrue("foo" not in apsw.vfsnames()) + + ## APSWVFSRegistrationFails + apsw.faultdict["APSWVFSRegistrationFails"] = True + self.assertRaises(apsw.NoMemError, apsw.VFS, "foo", "") + self.assertTrue("foo" not in apsw.vfsnames()) + + ## xReadReadBufferFail + try: + # This will fail if we are using auto-WAL so we don't run + # the rest of the test in WAL mode. + apsw.Connection(TESTFILEPREFIX + "testdb", vfs="faultvfs").cursor().execute("create table dummy1(x,y)") + openok = True + except apsw.CantOpenError: + if len(apsw.connection_hooks) == 0: + raise + openok = False + + # The following tests cause failures when making the + # connection because a connection hook turns on wal mode which + # causes database reads which then cause failures + if openok: + apsw.faultdict["xReadReadBufferFail"] = True + + def foo(): + apsw.Connection(TESTFILEPREFIX + "testdb", vfs="faultvfs").cursor().execute("select * from dummy1") + + self.assertRaises(apsw.SQLError, self.assertRaisesUnraisable, TypeError, foo) + + ## xUnlockFails + apsw.faultdict["xUnlockFails"] = True + # Used to wrap in self.assertRaises(apsw.IOError, ...) but SQLite no longer passes on the error. + # See https://sqlite.org/cvstrac/tktview?tn=3946 + self.assertRaisesUnraisable(apsw.IOError, + apsw.Connection(TESTFILEPREFIX + "testdb", vfs="faultvfs").cursor().execute, + "select * from dummy1") + + ## xSyncFails + apsw.faultdict["xSyncFails"] = True + self.assertRaises(apsw.IOError, self.assertRaisesUnraisable, apsw.IOError, + apsw.Connection(TESTFILEPREFIX + "testdb", vfs="faultvfs").cursor().execute, + "insert into dummy1 values(3,4)") + + ## xFileSizeFails + apsw.faultdict["xFileSizeFails"] = True + self.assertRaises(apsw.IOError, self.assertRaisesUnraisable, apsw.IOError, + apsw.Connection(TESTFILEPREFIX + "testdb", vfs="faultvfs").cursor().execute, + "select * from dummy1") + + ## xCheckReservedLockFails + apsw.faultdict["xCheckReservedLockFails"] = True + self.assertRaises(apsw.IOError, self.assertRaisesUnraisable, apsw.IOError, vfstestdb, vfsname="faultvfs") + + ## xCheckReservedLockIsTrue + apsw.faultdict["xCheckReservedLockIsTrue"] = True + vfstestdb(vfsname="faultvfs") + + ## xCloseFails + t = apsw.VFSFile("", os.path.abspath(TESTFILEPREFIX + "testfile"), + [apsw.SQLITE_OPEN_MAIN_DB | apsw.SQLITE_OPEN_CREATE | apsw.SQLITE_OPEN_READWRITE, 0]) + apsw.faultdict["xCloseFails"] = True + self.assertRaises(apsw.IOError, t.xClose) + del t + + # now catch it in the destructor + def foo(): + t = apsw.VFSFile("", os.path.abspath(TESTFILEPREFIX + "testfile"), + [apsw.SQLITE_OPEN_MAIN_DB | apsw.SQLITE_OPEN_CREATE | apsw.SQLITE_OPEN_READWRITE, 0]) + apsw.faultdict["xCloseFails"] = True + del t + gc.collect() + + self.assertRaisesUnraisable(apsw.IOError, foo) + + ## vfsnamesfails + apsw.faultdict["vfsnamesfails"] = True + self.assertRaises(MemoryError, apsw.vfsnames) + apsw.faultdict["vfsnamesallocfail"] = True + try: + apsw.vfsnames() + 1 / 0 + except MemoryError: + pass + apsw.faultdict["vfsnamesappendfails"] = True + self.assertRaises(MemoryError, apsw.vfsnames) + + ## StatementCacheAllocFails + apsw.faultdict["StatementCacheAllocFails"] = True + try: + apsw.Connection(":memory:") + 1 / 0 + except MemoryError: + pass + + ## OverloadFails + apsw.faultdict["OverloadFails"] = True + try: + db = apsw.Connection(":memory:") + db.overloadfunction("foo", 1) + 1 / 0 + except apsw.NoMemError: + pass + + ## ConnectionEnterExecFailed + apsw.faultdict["ConnectionEnterExecFailed"] = True + try: + db = apsw.Connection(":memory:") + db.__enter__() + 1 / 0 + except apsw.NoMemError: + pass + + ## BackupInitFails + apsw.faultdict["BackupInitFails"] = True + try: + db = apsw.Connection(":memory:") + db.backup("main", apsw.Connection(":memory:"), "main") + 1 / 0 + except apsw.NoMemError: + pass + + ## BackupNewFails + apsw.faultdict["BackupNewFails"] = True + try: + db = apsw.Connection(":memory:") + db.backup("main", apsw.Connection(":memory:"), "main") + 1 / 0 + except MemoryError: + pass + + ## BackupTupleFails + apsw.faultdict["BackupTupleFails"] = True + try: + db = apsw.Connection(":memory:") + # add dependent + cur = db.cursor() + cur.execute("select 3; select 4") + db.backup("main", apsw.Connection(":memory:"), "main") + 1 / 0 + except MemoryError: + pass + + ## BackupDependent + for i in range(1, 5): + apsw.faultdict["BackupDependent" + str(i)] = True + try: + db = apsw.Connection(":memory:") + self.assertMayRaiseUnraisable(ValueError, db.backup, "main", apsw.Connection(":memory:"), "main") + 1 / 0 + except MemoryError: + pass + + ### statement cache + db = apsw.Connection("", statementcachesize=1000000) + apsw.faultdict["SCAllocFails"] = True + # we have to overflow the recycle bin + inuse = [] + for n in range(4096): + try: + inuse.append(db.cursor().execute("select ?", (3, ))) + except apsw.NoMemError: + break + else: + self.fail("Expected memoryerror") + del inuse + apsw.faultdict["SCClearBindingsFails"] = True + self.assertRaises(apsw.NoMemError, db.cursor().execute, "select ?", (4, )) + + ### blobs + self.db.cursor().execute("create table blobs(x); insert into blobs values (zeroblob(33))") + rowid = self.db.last_insert_rowid() + apsw.faultdict["BlobReadIntoPyError"] = True + blob = self.db.blobopen("main", "blobs", "x", rowid, writeable=True) + self.assertRaises(MemoryError, blob.readinto, bytearray(33)) + apsw.faultdict["BlobWritePyError"] = True + self.assertRaises(MemoryError, blob.write, b"123") + + ### apsw.format_sql_value + apsw.faultdict["formatsqlHexStrFail"] = True + self.assertRaises(MemoryError, apsw.format_sql_value, b"aabbcc") + apsw.faultdict["formatsqlHexBufFail"] = True + self.assertRaises(MemoryError, apsw.format_sql_value, b"aabbcc") + apsw.faultdict["formatsqlStrFail"] = True + self.assertRaises(MemoryError, apsw.format_sql_value, "aabbcc") + + ## WalAutocheckpointFails + apsw.faultdict["WalAutocheckpointFails"] = True + try: + apsw.Connection(":memory:").wal_autocheckpoint(77) + 1 / 0 + except apsw.IOError: + pass + + ## WalCheckpointFails + apsw.faultdict["WalCheckpointFails"] = True + try: + apsw.Connection(":memory:").wal_checkpoint() + 1 / 0 + except apsw.IOError: + pass + + ## SCPHConfigFails + apsw.faultdict["SCPHConfigFails"] = True + try: + apsw.config(apsw.SQLITE_CONFIG_PCACHE_HDRSZ) + 1 / 0 + except apsw.FullError: + pass + + # Connection.db_names + apsw.faultdict["dbnamesnolist"] = True + self.assertRaises(MemoryError, self.db.db_names) + apsw.faultdict["dbnamestrfail"] = True + self.assertRaises(MemoryError, self.db.db_names) + apsw.faultdict["dbnamesappendfail"] = True + self.assertRaises(MemoryError, self.db.db_names) + + + def testExtDataClassRowFactory(self) -> None: + "apsw.ext.DataClassRowFactory" + import apsw.ext + dcrf = apsw.ext.DataClassRowFactory() + self.db.setrowtrace(dcrf) + # sanity check + for row in self.db.execute("select 3 as three, 'four' as four"): + self.assertEqual(row.three, 3) + self.assertEqual(row.four, 'four') + row.four = "five" # not frozen + # rename check + for row in self.db.execute("select 3 as three, 'four' as [4]"): + self.assertEqual(row.three, 3) + self.assertEqual(row._1, 'four') + # no rename, kwargs + dcrf2 = apsw.ext.DataClassRowFactory(rename=False, dataclass_kwargs={"frozen": True}) + self.db.setrowtrace(dcrf2) + self.assertRaises(TypeError, self.db.execute("select 4 as [4]").fetchall) + for row in self.db.execute("select 3 as three"): + try: + import dataclasses + row.three = 4 + except dataclasses.FrozenInstanceError: + pass + db = apsw.Connection("") + db.setrowtrace(dcrf) + for row in db.execute( + "create table foo([x y] some random typename here); insert into foo values(3); select * from foo"): + self.assertEqual(row.__description__, (('x y', 'some random typename here'), )) + # type annotations + self.db.setrowtrace(dcrf) + self.db.execute( + "create table foo(one [], two [an integer], three VARCHAR(17), four cblob, five doUBl, six [none of those]); insert into foo values(1,2,3,4,5,6)" + ) + self.assertEqual(dcrf.get_type("an integer"), int) + for row in self.db.execute("select * from foo"): + a = row.__annotations__ + self.assertEqual(a["one"], typing.Any) + self.assertEqual(a["two"], int) + self.assertEqual(a["three"], str) + self.assertEqual(a["four"], bytes) + self.assertEqual(a["five"], float) + self.assertEqual(a["six"], typing.Union[float, int]) + + def testExtTypesConverter(self) -> None: + "apsw.ext.TypesConverterCursorFactory" + import apsw.ext + + tccf = apsw.ext.TypesConverterCursorFactory() + + class Point(apsw.ext.SQLiteTypeAdapter): + + def to_sqlite_value(self): + return 3 + + tccf.register_adapter(complex, lambda c: f"{ c.real };{ c.imag }") + tccf.register_converter("COMPLEX", lambda v: complex(*(float(part) for part in v.split(";")))) + self.db.cursor_factory = tccf + self.db.execute("create table foo(a POINT, b COMPLEX)") + self.db.execute("insert into foo values(?,?);", (Point(), 3 + 4j)) + self.db.execute(" insert into foo values(:one, :two)", {"one": Point(), "two": 3 + 4j}) + + def datas(): + for _ in range(10): + yield (Point(), 3 + 4j) + + self.db.executemany("insert into foo values(?,?)", datas()) + for row in self.db.execute("select * from foo"): + self.assertEqual(row[0], 3) + self.assertEqual(row[1], 3 + 4j) + + self.assertRaises(TypeError, tccf.adapt_value, {}) + self.assertEqual(tccf.convert_value("zebra", "zebra"), "zebra") + + def builtin_types(): + yield (None, ) + yield (3, ) + yield (b"aabbccddee", ) + yield ("hello world", ) + yield (3.1415, ) + + self.assertEqual(self.db.executemany("select ?", builtin_types()).fetchall(), list(builtin_types())) + + class NotImplemented(apsw.ext.SQLiteTypeAdapter): + pass + + self.assertRaises(TypeError, NotImplemented) + + def testExtQueryInfo(self) -> None: + "apsw.ext.query_info" + import apsw.ext + + qd = apsw.ext.query_info(self.db, "select 3; a syntax error") + self.assertEqual(qd.query, "select 3; a syntax error") + self.assertEqual(qd.bindings, None) + self.assertEqual(qd.first_query, "select 3; ") + self.assertEqual(qd.query_remaining, "a syntax error") + self.assertEqual(qd.is_explain, 0) + self.assertEqual(qd.is_readonly, True) + self.assertEqual(qd.description, (('3', None), )) + + self.assertEqual(1, apsw.ext.query_info(self.db, "explain select 3").is_explain) + self.assertEqual(2, apsw.ext.query_info(self.db, "explain query plan select 3").is_explain) + + self.db.execute( + "create table one(x up); create table two(x down); insert into one values(3); insert into two values(3)") + self.assertFalse(apsw.ext.query_info(self.db, "insert into two values(7)").is_readonly) + + # actions + query = "select * from one join two" + self.assertIsNone(apsw.ext.query_info(self.db, query).actions) + qd = apsw.ext.query_info(self.db, query, actions=True) + self.assertTrue( + any(a.action_name == "SQLITE_READ" and a.table_name == "one" for a in qd.actions) + and any(a.action_name == "SQLITE_READ" and a.table_name == "two" for a in qd.actions)) + + # expanded_sql + self.assertEqual("select 3, 'three'", + apsw.ext.query_info(self.db, "select ?, ?", (3, "three"), expanded_sql=True).expanded_sql) + + # explain / explain query_plan + # from https://sqlite.org/lang_with.html + query = """ +WITH RECURSIVE + xaxis(x) AS (VALUES(-2.0) UNION ALL SELECT x+0.05 FROM xaxis WHERE x<1.2), + yaxis(y) AS (VALUES(-1.0) UNION ALL SELECT y+0.1 FROM yaxis WHERE y<1.0), + m(iter, cx, cy, x, y) AS ( + SELECT 0, x, y, 0.0, 0.0 FROM xaxis, yaxis + UNION ALL + SELECT iter+1, cx, cy, x*x-y*y + cx, 2.0*x*y + cy FROM m + WHERE (x*x + y*y) < 4.0 AND iter<28 + ), + m2(iter, cx, cy) AS ( + SELECT max(iter), cx, cy FROM m GROUP BY cx, cy + ), + a(t) AS ( + SELECT group_concat( substr(' .+*#', 1+min(iter/7,4), 1), '') + FROM m2 GROUP BY cy + ) +SELECT group_concat(rtrim(t),x'0a') FROM a; + """ + self.assertIsNone(apsw.ext.query_info(self.db, query).explain) + qd = apsw.ext.query_info(self.db, query, explain=True) + self.assertTrue(all(isinstance(e, apsw.ext.VDBEInstruction) for e in qd.explain)) + # at time of writing it was 233 steps, so use ~10% of that + self.assertGreater(len(qd.explain), 25) + self.assertIsNone(apsw.ext.query_info(self.db, query).query_plan) + qd = apsw.ext.query_info(self.db, query, explain_query_plan=True) + + def check_instance(node: apsw.ext.QueryPlan): + return isinstance(node, apsw.ext.QueryPlan) and all(check_instance(s) for s in (node.sub or [])) + + self.assertTrue(check_instance(qd.query_plan)) + + def count(node: apsw.ext.QueryPlan): + return 1 + sum(count(s) for s in (node.sub or [])) + + # at time of writing it was 24 nodes + self.assertGreater(count(qd.query_plan), 10) + + # This test is run last by deliberate name choice. If it did + # uncover any bugs there isn't much that can be done to turn the + # checker off. + def testzzForkChecker(self): + "Test detection of using objects across fork" + # need to free up everything that already exists + self.db.close() + self.db = None + gc.collect() + # install it + apsw.fork_checker() + + # return some objects + def getstuff(): + db = apsw.Connection(":memory:") + cur = db.cursor() + for row in cur.execute( + "create table foo(x);insert into foo values(1);insert into foo values(x'aabbcc'); select last_insert_rowid()" + ): + blobid = row[0] + blob = db.blobopen("main", "foo", "x", blobid, 0) + db2 = apsw.Connection(":memory:") + if hasattr(db2, "backup"): + backup = db2.backup("main", db, "main") + else: + backup = None + return (db, cur, blob, backup) + + # test the objects + def teststuff(db, cur, blob, backup): + if db: + db.cursor().execute("select 3") + if cur: + cur.execute("select 3") + if blob: + blob.read(1) + if backup: + backup.step() + + # Sanity check + teststuff(*getstuff()) + # get some to use in parent + parent = getstuff() + # to be used (and fail with error) in child + child = getstuff() + + def childtest(*args): + # we can't use unittest methods here since we are in a different process + val = args[0] + args = args[1:] + # this should work + teststuff(*getstuff()) + + # ignore the unraiseable stuff sent to sys.excepthook + def eh(*args): + pass + + sys.excepthook = eh + # call with each separate item to check + try: + for i in range(len(args)): + a = [None] * len(args) + a[i] = args[i] + try: + teststuff(*a) + except apsw.ForkingViolationError: + pass + except apsw.ForkingViolationError: + # we get one final exception "between" line due to the + # nature of how the exception is raised + pass + # this should work again + teststuff(*getstuff()) + val.value = 1 + + import multiprocessing + val = multiprocessing.Value("i", 0) + p = multiprocessing.Process(target=childtest, args=[val] + list(child)) + p.start() + p.join() + self.assertEqual(1, val.value) # did child complete ok? + teststuff(*parent) + + # we call shutdown to free mutexes used in fork checker, + # so clear out all the things first + del child + del parent + gc.collect() + apsw.shutdown() + + +testtimeout = False # timeout testing adds several seconds to each run + + +def vfstestdb(filename=TESTFILEPREFIX + "testdb2", vfsname="apswtest", closedb=True, mode=None, attachdb=None): + "This method causes all parts of a vfs to be executed" + gc.collect() # free any existing db handles + for suf in "", "-journal", "x", "x-journal": + deletefile(filename + suf) + + db = apsw.Connection("file:" + filename + "?psow=0", vfs=vfsname, flags=openflags) + if mode: + db.cursor().execute("pragma journal_mode=" + mode) + db.cursor().execute( + "create table foo(x,y); insert into foo values(1,2); insert into foo values(date('now'), date('now'))") + if testtimeout: + # busy + db2 = apsw.Connection(filename, vfs=vfsname) + if mode: + db2.cursor().execute("pragma journal_mode=" + mode) + db.setbusytimeout(1100) + db2.cursor().execute("begin exclusive") + try: + db.cursor().execute("begin immediate") + 1 / 0 # should not be reached + except apsw.BusyError: + pass + db2.cursor().execute("end") + + # cause truncate to be called + # see sqlite test/pager3.test where this (public domain) code is taken from + # I had to add the pragma journal_mode to get it to work + c = db.cursor() + for row in c.execute("pragma journal_mode=truncate"): + pass + + c.execute(""" + create table t1(a unique, b); + insert into t1 values(1, 'abcdefghijklmnopqrstuvwxyz'); + insert into t1 values(2, 'abcdefghijklmnopqrstuvwxyz'); + update t1 set b=b||a||b; + update t1 set b=b||a||b; + update t1 set b=b||a||b; + update t1 set b=b||a||b; + update t1 set b=b||a||b; + update t1 set b=b||a||b; + create temp table t2 as select * from t1; + begin; + create table t3(x);""") + try: + c.execute("insert into t1 select 4-a, b from t2") + except apsw.ConstraintError: + pass + c.execute("rollback") + + if attachdb: + c.execute("attach '%s' as second" % (attachdb, )) + + if hasattr(APSW, "testLoadExtension"): + # can we use loadextension? + db.enableloadextension(True) + try: + db.loadextension("./" * 128 + LOADEXTENSIONFILENAME + "xxx") + except apsw.ExtensionLoadingError: + pass + db.loadextension(LOADEXTENSIONFILENAME) + assert (1 == next(db.cursor().execute("select half(2)"))[0]) + + # Get the routine xCheckReservedLock to be called. We need a hot journal + # which this code adapted from SQLite's pager.test does + if not iswindows: + c.execute("create table abc(a,b,c)") + for i in range(20): + c.execute("insert into abc values(1,2,?)", (randomstring(200), )) + c.execute("begin; update abc set c=?", (randomstring(200), )) + + write_whole_file(filename + "x", "wb", read_whole_file(filename, "rb")) + write_whole_file(filename + "x-journal", "wb", read_whole_file(filename + "-journal", "rb")) + + f = open(filename + "x-journal", "ab") + f.seek(-1032, 2) # 1032 bytes before end of file + f.write(b"\x00\x00\x00\x00") + f.close() + + hotdb = apsw.Connection(filename + "x", vfs=vfsname) + if mode: + hotdb.cursor().execute("pragma journal_mode=" + mode) + hotdb.cursor().execute("select sql from sqlite_master") + hotdb.close() + + if closedb: + db.close() + else: + return db + + +if not iswindows: + # note that a directory must be specified otherwise $LD_LIBRARY_PATH is used + LOADEXTENSIONFILENAME = "./testextension.sqlext" +else: + LOADEXTENSIONFILENAME = "testextension.sqlext" + +MEMLEAKITERATIONS = 1000 +PROFILESTEPS = 250000 + + +def setup(): + """Call this if importing this test suite as it will ensure tests + we can't run are removed etc. It will also print version + information.""" + + print_version_info() + try: + apsw.config(apsw.SQLITE_CONFIG_MEMSTATUS, True) # ensure memory tracking is on + except apsw.MisuseError: + # if using amalgamation then something went wrong + if apsw.using_amalgamation: + raise + # coverage uses sqlite and so the config call is too + # late + pass + apsw.initialize() # manual call for coverage + memdb = apsw.Connection(":memory:") + if not getattr(memdb, "enableloadextension", None): + del APSW.testLoadExtension + + # py 3.6 can't load apsw.ext + if sys.version_info < (3, 7): + for name in list(dir(APSW)): + if name.startswith("testExt"): + delattr(APSW, name) + + forkcheck = False + if hasattr(apsw, "fork_checker") and hasattr(os, "fork") and platform.python_implementation() != "PyPy": + try: + import multiprocessing + if hasattr(multiprocessing, "get_start_method"): + if multiprocessing.get_start_method() != "fork": + raise ImportError + # sometimes the import works but doing anything fails + val = multiprocessing.Value("i", 0) + forkcheck = True + except ImportError: + pass + + # we also remove forkchecker if doing multiple iterations + if not forkcheck or "APSW_TEST_ITERATIONS" in os.environ: + del APSW.testzzForkChecker + + if not is64bit or "APSW_TEST_LARGE" not in os.environ: + del APSW.testLargeObjects + + # We can do extension loading but no extension present ... + if getattr(memdb, "enableloadextension", None) and not os.path.exists(LOADEXTENSIONFILENAME): + print("Not doing LoadExtension test. You need to compile the extension first\n") + print(" python3 setup.py build_test_extension") + del APSW.testLoadExtension + + # coverage testing of the shell + if "APSW_PY_COVERAGE" in os.environ: + APSW._originaltestShell = APSW.testShell + APSW.testShell = APSW._testShellWithCoverage + + # python version compatibility + if not hasattr(APSW, "assertRaisesRegex"): + APSW.assertRaisesRegex = APSW.assertRaisesRegexCompat + + del memdb + + +test_types_vals = ( + "a simple string", # "ascii" string + "0123456789" * 200000, # a longer string + u"a \u1234 unicode \ufe54 string \u0089", # simple unicode string + u"\N{BLACK STAR} \N{WHITE STAR} \N{LIGHTNING} \N{COMET} ", # funky unicode or an episode of b5 + u"\N{MUSICAL SYMBOL G CLEF}", # http://www.cmlenz.net/archives/2008/07/the-truth-about-unicode-in-python + 97, # integer + 2147483647, # numbers on 31 bit boundary (32nd bit used for integer sign), and then + -2147483647, # start using 32nd bit (must be represented by 64bit to avoid losing + 2147483648, # detail) + -2147483648, + 2147483999, + -2147483999, + 992147483999, + -992147483999, + 9223372036854775807, + -9223372036854775808, + b"a set of bytes", # bag of bytes initialised from a string, but don't confuse it with a + b"".join([b"\\x%02x" % (x, ) for x in range(256)]), # string + b"".join([b"\\x%02x" % (x, ) for x in range(256)]) * 20000, # non-trivial size + None, # our good friend NULL/None + 1.1, # floating point can't be compared exactly - assertAlmostEqual is used to check + 10.2, # see Appendix B in the Python Tutorial + 1.3, + 1.45897589347E97, + 5.987987 / 8.7678678687676786, + math.pi, + True, # derived from integer + False) + +if __name__ == '__main__': + setup() + + def runtests(): + + def set_wal_mode(c): + # Note that WAL won't be on for memory databases. This + # execution returns the active mode + c.execute("PRAGMA journal_mode=WAL").fetchall() + + def fsync_off(c): + try: + c.execute("PRAGMA synchronous=OFF ; PRAGMA fullfsync=OFF; PRAGMA checkpoint_fullfsync=OFF") + except apsw.BusyError: + pass + + b4 = apsw.connection_hooks[:] + try: + if "APSW_TEST_WALMODE" in os.environ: + apsw.connection_hooks.append(set_wal_mode) + print("WAL mode testing") + + if "APSW_TEST_FSYNC_OFF" in os.environ: + apsw.connection_hooks.append(fsync_off) + + if os.getenv("PYTRACE"): + import trace + t = trace.Trace(count=0, trace=1, ignoredirs=[sys.prefix, sys.exec_prefix]) + t.runfunc(unittest.main) + else: + unittest.main() + finally: + apsw.connection_hooks = b4 + + v = os.environ.get("APSW_TEST_ITERATIONS", None) + if v is None: + try: + runtests() + except SystemExit: + exitcode = sys.exc_info()[1].code + else: + # we run all the tests multiple times which has better coverage + # a larger value for MEMLEAKITERATIONS slows down everything else + MEMLEAKITERATIONS = 5 + PROFILESTEPS = 1000 + v = int(v) + for i in range(v): + print(f"Iteration { i + 1 } of { v }") + try: + runtests() + except SystemExit: + exitcode = sys.exc_info()[1].code + + # Free up everything possible + del APSW + del ThreadRunner + del randomintegers + + # clean up sqlite and apsw + gc.collect() # all cursors & connections must be gone + apsw.shutdown() + apsw.config(apsw.SQLITE_CONFIG_LOG, None) + if hasattr(apsw, "_fini"): + apsw._fini() + gc.collect() + del apsw + + exit = sys.exit + + # modules + del unittest + del os + del math + del random + del time + del threading + del queue + del traceback + del re + gc.collect() + + exit(exitcode) \ No newline at end of file diff -Nru python-apsw-3.39.2.0/apsw/trace.py python-apsw-3.40.0.0/apsw/trace.py --- python-apsw-3.39.2.0/apsw/trace.py 1970-01-01 00:00:00.000000000 +0000 +++ python-apsw-3.40.0.0/apsw/trace.py 2022-10-26 14:11:04.000000000 +0000 @@ -0,0 +1,332 @@ +#!/usr/bin/env python3 +# +# See the accompanying LICENSE file. +# +# This module lets you automatically trace SQL operations in a program +# using APSW without having to modify the program in any way. + +import time +import sys +import weakref + +class APSWTracer(object): + + def __init__(self, options): + self.u="" + import _thread + self.threadid=_thread.get_ident + self.stringtypes=(str,) + self.numtypes=(int, float) + self.binarytypes=(bytes,) + self.options=options + if options.output in ("-", "stdout"): + self._writer=sys.stdout.write + elif options.output=="stderr": + self._writer=sys.stderr.write + else: + self._writer=open(options.output, "wt").write + + try: + import apsw + apsw.connection_hooks.append(self.connection_hook) + except: + sys.stderr.write(self.u+"Unable to import apsw\n") + raise + + self.mapping_open_flags=apsw.mapping_open_flags + self.zeroblob=apsw.zeroblob + self.apswConnection=apsw.Connection + + self.newcursor={} + self.threadsused={} # really want a set + self.queries={} + self.timings={} + self.rowsreturned=0 + self.numcursors=0 + self.numconnections=0 + self.timestart=time.time() + + def writerpy3(self, s): + self._writer(s+"\n") + + writer=writerpy3 + + def format(self, obj): + if isinstance(obj, dict): + return self.formatdict(obj) + if isinstance(obj, tuple): + return self.formatseq(obj, '()') + if isinstance(obj, list): + return self.formatseq(obj, '[]') + if isinstance(obj, self.stringtypes): + return self.formatstring(obj) + if obj is True: + return "True" + if obj is False: + return "False" + if obj is None: + return "None" + if isinstance(obj, self.numtypes): + return repr(obj) + if isinstance(obj, self.binarytypes): + return self.formatbinary(obj) + if isinstance(obj, self.zeroblob): + return "zeroblob(%d)" % (obj.length(),) + return repr(obj) + + def formatstring(self, obj, quote='"', checkmaxlen=True): + obj=obj.replace("\n", "\\n").replace("\r", "\\r") + if checkmaxlen and len(obj)>self.options.length: + obj=obj[:self.options.length]+'..' + return self.u+quote+obj+quote + + def formatdict(self, obj): + items=list(obj.items()) + items.sort() + op=[] + for k,v in items: + op.append(self.format(k)+": "+self.format(v)) + return self.u+"{"+", ".join(op)+"}" + + def formatseq(self, obj, paren): + return self.u+paren[0]+", ".join([self.format(v) for v in obj])+paren[1] + + def formatbinary(self, obj): + if len(obj)`__ +embedded relational database engine. It focuses translating between +the complete `SQLite C API `__ +and `Python's C API `__, +letting you get the most out of SQLite from Python. + +It is recommended to use the builtin `sqlite3 module +`__ if you want SQLite +to be interchangeable with the other database drivers. + +Use APSW when you want to use SQLite fully, and have an improved +developer experience. The `documentation +`__ has a section on +the differences between APSW and sqlite3. + +Help/Documentation +================== + +The latest documentation is at https://rogerbinns.github.io/apsw/ + +Mailing lists/contacts +====================== + +* `Python SQLite discussion group `__ + (preferred) +* You can also email the author at `rogerb@rogerbinns.com + `__ + +Releases and Changes +==================== + +Releases are made to `PyPI `__ +(install using pip) and `Github +`__ + +New release announcements are sent to the `Python SQLite discussion +group `__ and there is +an `RSS feed from PyPI +`__. + +`Full detailed list of changes `__ + +Bugs +==== + +You can find existing and fixed bugs by clicking on `Issues +`__ and using "New Issue" +to report previously unknown issues. + +License +======= + +See `LICENSE +`__ - in +essence any OSI approved open source license. + +Older Python versions +===================== + +A `release +`__ +from January 2022 supports all CPython versions back to 2.3. The +`tips `__ include more +information about versions. + + diff -Nru python-apsw-3.39.2.0/apsw.egg-info/SOURCES.txt python-apsw-3.40.0.0/apsw.egg-info/SOURCES.txt --- python-apsw-3.39.2.0/apsw.egg-info/SOURCES.txt 1970-01-01 00:00:00.000000000 +0000 +++ python-apsw-3.40.0.0/apsw.egg-info/SOURCES.txt 2022-11-27 14:16:58.000000000 +0000 @@ -0,0 +1,35 @@ +LICENSE +MANIFEST.in +README.rst +checksums +setup.cfg +setup.py +apsw/__init__.pyi +apsw/__main__.py +apsw/ext.py +apsw/py.typed +apsw/shell.py +apsw/speedtest.py +apsw/tests.py +apsw/trace.py +apsw.egg-info/PKG-INFO +apsw.egg-info/SOURCES.txt +apsw.egg-info/dependency_links.txt +apsw.egg-info/top_level.txt +src/apsw.c +src/apsw.docstrings +src/apswversion.h +src/argparse.c +src/backup.c +src/blob.c +src/connection.c +src/cursor.c +src/exceptions.c +src/pyutil.c +src/statementcache.c +src/testextension.c +src/traceback.c +src/types.py +src/util.c +src/vfs.c +src/vtable.c \ No newline at end of file diff -Nru python-apsw-3.39.2.0/apsw.egg-info/top_level.txt python-apsw-3.40.0.0/apsw.egg-info/top_level.txt --- python-apsw-3.39.2.0/apsw.egg-info/top_level.txt 1970-01-01 00:00:00.000000000 +0000 +++ python-apsw-3.40.0.0/apsw.egg-info/top_level.txt 2022-11-27 14:16:58.000000000 +0000 @@ -0,0 +1 @@ +apsw diff -Nru python-apsw-3.39.2.0/checksums python-apsw-3.40.0.0/checksums --- python-apsw-3.39.2.0/checksums 2022-07-25 07:17:12.000000000 +0000 +++ python-apsw-3.40.0.0/checksums 2022-11-25 06:07:44.000000000 +0000 @@ -24,3 +24,7 @@ https://sqlite.org/2022/sqlite-autoconf-3390000.tar.gz 3064015 a839c65452f7ea66e9ba9efa52b47ba1cd84225c bbe56bf9d822d78c942a065bf041ffd5 https://sqlite.org/2022/sqlite-autoconf-3390200.tar.gz 3064438 fe360190393296b956c5db2a448ae2b5692d0377 f00711818d0afc18f4b1b3b7207176f4 https://sqlite.org/2022/sqlite-autoconf-3390100.tar.gz 3064607 251b8f9838ae6c44df1eb8bcc3600e14fa0597c5 cc9d19c9e3ae6dee02eb4f147e0dd500 +https://sqlite.org/2022/sqlite-autoconf-3390300.tar.gz 3064970 ded6323be5fb10fddd7c3053a80c6d38e1356256 b77730d5c2f8c85b223d1959d08b6514 +https://sqlite.org/2022/sqlite-autoconf-3390400.tar.gz 3065214 c4c5c39269d1b9bb1487cff580c1f583608229b2 44b7e6691b0954086f717a6c43b622a5 + +https://sqlite.org/2022/sqlite-autoconf-3400000.tar.gz 3097756 2aa5df983dc2d7e6b096b49e579bcc1e7b80667e c833d61da768a116fa16d910f43cfd9a diff -Nru python-apsw-3.39.2.0/debian/changelog python-apsw-3.40.0.0/debian/changelog --- python-apsw-3.39.2.0/debian/changelog 2022-11-02 10:44:49.000000000 +0000 +++ python-apsw-3.40.0.0/debian/changelog 2022-12-23 18:19:32.000000000 +0000 @@ -1,8 +1,15 @@ -python-apsw (3.39.2.0-1build1) lunar; urgency=medium +python-apsw (3.40.0.0-2) unstable; urgency=medium - * No-change rebuild with Python 3.11 as supported + * Update Standards-Version to 4.6.2 with no changes + * Add build dependency on python3-setuptools (closes: #1026582) - -- Graham Inggs Wed, 02 Nov 2022 10:44:49 +0000 + -- Joel Rosdahl Fri, 23 Dec 2022 19:19:32 +0100 + +python-apsw (3.40.0.0-1) unstable; urgency=medium + + * New upstream release 3.40.0.0 + + -- Joel Rosdahl Mon, 28 Nov 2022 21:47:25 +0100 python-apsw (3.39.2.0-1) unstable; urgency=medium diff -Nru python-apsw-3.39.2.0/debian/control python-apsw-3.40.0.0/debian/control --- python-apsw-3.39.2.0/debian/control 2022-08-03 10:58:29.000000000 +0000 +++ python-apsw-3.40.0.0/debian/control 2022-12-23 18:19:32.000000000 +0000 @@ -6,8 +6,9 @@ debhelper-compat (= 12), dh-python, python3-all-dev, + python3-setuptools, libsqlite3-dev (>= 3.32.0) -Standards-Version: 4.6.1 +Standards-Version: 4.6.2 X-Python-Version: all Package: python3-apsw diff -Nru python-apsw-3.39.2.0/doc/apsw.html python-apsw-3.40.0.0/doc/apsw.html --- python-apsw-3.39.2.0/doc/apsw.html 2022-07-31 06:50:52.000000000 +0000 +++ python-apsw-3.40.0.0/doc/apsw.html 2022-11-27 14:16:56.000000000 +0000 @@ -1,19 +1,21 @@ - + - APSW Module — APSW 3.39.2.0 documentation + APSW Module — APSW 3.40.0.0 documentation + + @@ -43,7 +45,7 @@
  • previous |
  • - + @@ -54,116 +56,161 @@
    -

    APSW Module

    +

    APSW Module

    The module is the main interface to SQLite. Methods and data on the module have process wide effects.

    -

    Type Annotations

    +

    Type Annotations

    Comprehensive type annotations are included, and your code using apsw can be checked using tools like mypy. You can refer to the types below for -your annotations (eg as apsw.SQLiteValue)

    -
    
    -.. class:: SQLiteValue
    -
    -    | `Union <https://docs.python.org/3/library/typing.html#typing.Union>`__ [None, int, float, bytes, str]
    -
    -    SQLite supports 5 types - None (NULL), 64 bit signed int, 64 bit
    -    float, bytes, and unicode text
    -
    -
    -.. class:: SQLiteValues
    -
    -    | `Union <https://docs.python.org/3/library/typing.html#typing.Union>`__ [`Tuple <https://docs.python.org/3/library/typing.html#typing.Tuple>`__ [()], `Tuple <https://docs.python.org/3/library/typing.html#typing.Tuple>`__ [:class:`SQLiteValue`, ...]]
    -
    -    A sequence of zero or more :class:`SQLiteValue`
    -
    -
    -.. class:: Bindings
    -
    -    | `Union <https://docs.python.org/3/library/typing.html#typing.Union>`__ [`Sequence <https://docs.python.org/3/library/typing.html#typing.Sequence>`__ [`Union <https://docs.python.org/3/library/typing.html#typing.Union>`__ [:class:`SQLiteValue`, :class:`zeroblob`]], `Dict <https://docs.python.org/3/library/typing.html#typing.Dict>`__ [str, `Union <https://docs.python.org/3/library/typing.html#typing.Union>`__ [:class:`SQLiteValue`, :class:`zeroblob`]]]
    -
    -    Query bindings are either a sequence of :class:`SQLiteValue`, or a dict mapping names
    -    to :class:`SQLiteValues`.  You can also provide :class:`zeroblob` in :class:`Bindings`.
    -
    -
    -.. class:: AggregateT
    -
    -    | `Any <https://docs.python.org/3/library/typing.html#typing.Any>`__ 
    -
    -    An object provided as first parameter of step and final aggregate functions
    -
    -
    -.. class:: AggregateStep
    -
    -    | `Union <https://docs.python.org/3/library/typing.html#typing.Union>`__ [
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`AggregateT`], None],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`AggregateT`, :class:`SQLiteValue`], None],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`AggregateT`, :class:`SQLiteValue`, :class:`SQLiteValue`], None],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`AggregateT`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`], None],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`AggregateT`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`], None],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`AggregateT`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`], None],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`AggregateT`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`], None],
    -    | ]
    -
    -    :class:`AggregateStep` is called on each matching row with the relevant number of :class:`SQLiteValue`
    -
    -
    -.. class:: AggregateFinal
    -
    -    | `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`AggregateT`], :class:`SQLiteValue`]
    -
    -    Final is called after all matching rows have been processed by step, and returns a :class:`SQLiteValue`
    -
    -
    -.. class:: AggregateFactory
    -
    -    | `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[], `Tuple <https://docs.python.org/3/library/typing.html#typing.Tuple>`__ [:class:`AggregateT`, :class:`AggregateStep`, :class:`AggregateFinal`]]
    -
    -    Called each time for the start of a new calculation using an aggregate function,
    -    returning an object, a step function and a final function
    -
    -
    -.. class:: ScalarProtocol
    -
    -    | `Union <https://docs.python.org/3/library/typing.html#typing.Union>`__ [
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[], :class:`SQLiteValue`],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`SQLiteValue`], :class:`SQLiteValue`],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`SQLiteValue`, :class:`SQLiteValue`], :class:`SQLiteValue`],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`], :class:`SQLiteValue`],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`], :class:`SQLiteValue`],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`], :class:`SQLiteValue`],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`], :class:`SQLiteValue`],
    -    |         `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`, :class:`SQLiteValue`], :class:`SQLiteValue`]
    -    | ]
    +your annotations (eg as apsw.SQLiteValue)

    +
    +
    +class SQLiteValue
    +
    +

    SQLite supports 5 types - None (NULL), 64 bit signed int, 64 bit +float, bytes, and str (unicode text)

    +
    - Scalar callbacks take zero or more :class:`SQLiteValues`, and return a :class:`SQLiteValue` +
    +
    +class SQLiteValues
    +
    +
    Union [Tuple [()], Tuple [ SQLiteValue, …]]
    +
    +

    A sequence of zero or more SQLiteValue

    +
    +
    +
    +class Bindings
    +
    +

    Query bindings are either a sequence of SQLiteValue, or a dict mapping names +to SQLiteValues. You can also provide zeroblob in Bindings. You can use +dict subclasses or any type registered with collections.abc.Mapping +for named bindings

    +
    -.. class:: RowTracer +
    +
    +class AggregateT
    +
    + +
    +

    An object provided as first parameter of step and final aggregate functions

    +
    - | `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`Cursor`, :class:`SQLiteValues`], `Any <https://docs.python.org/3/library/typing.html#typing.Any>`__ ] +
    +
    +class AggregateStep
    +
    +
    +

    AggregateStep is called on each matching row with the relevant number of SQLiteValue

    +
    +
    - Row tracers are called with the :class:`Cursor`, and the row that would - be returned. If you return None, then no row is returned, otherwise - whatever is returned is returned as a result row for the query +
    +
    +class AggregateFinal
    +
    +

    Final is called after all matching rows have been processed by step, and returns a SQLiteValue

    +
    +
    +
    +class AggregateFactory
    +
    +

    Called each time for the start of a new calculation using an aggregate function, +returning an object, a step function and a final function

    +
    -.. class:: ExecTracer +
    +
    +class ScalarProtocol
    +
    +

    Scalar callbacks take zero or more SQLiteValues, and return a SQLiteValue

    +
    - | `Callable <https://docs.python.org/3/library/typing.html#typing.Callable>`__ [[:class:`Cursor`, str, `Optional <https://docs.python.org/3/library/typing.html#typing.Optional>`__ [:class:`Bindings`]], bool] +
    +
    +class RowTracer
    +
    +

    Row tracers are called with the Cursor, and the row that would +be returned. If you return None, then no row is returned, otherwise +whatever is returned is returned as a result row for the query

    +
    - Execution tracers are called with the cursor, sql query text, and the bindings - used. Return False/None to abort execution, or True to continue +
    +
    +class ExecTracer
    +
    +

    Execution tracers are called with the cursor, sql query text, and the bindings +used. Return False/None to abort execution, or True to continue

    +
    +
    +
    +class Authorizer
    +
    +

    Authorizers are called with an operation code and 4 strings (which could be None) depending +on the operatation. Return SQLITE_OK, SQLITE_DENY, or SQLITE_IGNORE

    +
    -
    +
    +
    +class CommitHook
    +
    +
    +

    Commit hook is called with no arguments and should return True to abort the commit and False +to let it continue

    +
    +
    -

    API Reference

    +

    API Reference

    -SQLITE_VERSION_NUMBER: int
    +SQLITE_VERSION_NUMBER: int

    The integer version number of SQLite that APSW was compiled against. For example SQLite 3.6.4 will have the value 3006004. This number may be different than the actual library in use if the @@ -173,13 +220,13 @@

    -apswversion() str
    +apswversion() str

    Returns the APSW version.

    -compile_options: Tuple[str, ...]
    +compile_options: Tuple[str, ...]

    A tuple of the options used to compile SQLite. For example it will be something like this:

    ('ENABLE_LOCKING_STYLE=0', 'TEMP_STORE=1', 'THREADSAFE=1')
    @@ -190,7 +237,7 @@
     
     
    -complete(statement: str) bool
    +complete(statement: str) bool

    Returns True if the input string comprises one or more complete SQL statements by looking for an unquoted trailing semi-colon.

    An example use would be if you were prompting the user for SQL @@ -207,7 +254,7 @@

    -config(op: int, *args: Any) None
    +config(op: int, *args: Any) None
    Parameters
      @@ -229,7 +276,7 @@
      -connection_hooks: List[Callable[[Connection], None]]
      +connection_hooks: List[Callable[[Connection], None]]

      The purpose of the hooks is to allow the easy registration of functions, virtual tables or similar items with @@ -246,7 +293,7 @@

      -enablesharedcache(enable: bool) None
      +enablesharedcache(enable: bool) None

      If you use the same Connection across threads or use multiple connections accessing the same file, then SQLite can share the cache between them. It is not @@ -256,7 +303,7 @@

      -exceptionfor(code: int) Exception
      +exceptionfor(code: int) Exception

      If you would like to raise an exception that corresponds to a particular SQLite error code then call this function. It also understands extended error codes.

      @@ -268,7 +315,7 @@
      -fork_checker() None
      +fork_checker() None

      Note This method is not available on Windows as it does not support the fork system call.

      SQLite does not allow the use of database connections across forked processes @@ -306,20 +353,20 @@

      -format_sql_value(value: SQLiteValue) str
      +format_sql_value(value: SQLiteValue) str

      Returns a Python string representing the supplied value in SQL syntax.

      -initialize() None
      +initialize() None

      It is unlikely you will want to call this method as SQLite automatically initializes.

      Calls: sqlite3_initialize

      -keywords: Set[str]
      +keywords: Set[str]

      A set containing every SQLite keyword

      Calls:
        @@ -332,7 +379,7 @@
        -log(errorcode: int, message: str) None
        +log(errorcode: int, message: str) None

        Calls the SQLite logging interface. Note that you must format the message before passing it to this method:

        apsw.log(apsw.SQLITE_NOMEM, f"Need { needed } bytes of memory")
        @@ -343,16 +390,9 @@
         

        Calls: sqlite3_log

        -
        -
        -main()
        -

        Call this to run the interactive shell. It -automatically passes in sys.argv[1:] and exits Python when done.

        -
        -
        -memoryhighwater(reset: bool = False) int
        +memoryhighwater(reset: bool = False) int

        Returns the maximum amount of memory SQLite has used. If reset is True then the high water mark is reset to the current value.

        @@ -364,7 +404,7 @@
        -memoryused() int
        +memoryused() int

        Returns the amount of memory SQLite is currently using.

        See also

        @@ -375,7 +415,7 @@
        -randomness(amount: int) bytes
        +randomness(amount: int) bytes

        Gets random data from SQLite’s random number generator.

        Parameters
        @@ -387,7 +427,7 @@
        -releasememory(amount: int) int
        +releasememory(amount: int) int

        Requests SQLite try to free amount bytes of memory. Returns how many bytes were freed.

        Calls: sqlite3_release_memory

        @@ -395,18 +435,18 @@
        -shutdown() None
        +shutdown() None

        It is unlikely you will want to call this method and there is no need to do so. It is a really bad idea to call it unless you are absolutely sure all connections, -blobs, cursors, vfs +blobs, cursors, vfs etc have been closed, deleted and garbage collected.

        Calls: sqlite3_shutdown

        -softheaplimit(limit: int) int
        +softheaplimit(limit: int) int

        Requests SQLite try to keep memory usage below amount bytes and returns the previous limit.

        Calls: sqlite3_soft_heap_limit64

        @@ -414,7 +454,7 @@
        -sqlite3_sourceid() str
        +sqlite3_sourceid() str

        Returns the exact checkin information for the SQLite 3 source being used.

        Calls: sqlite3_sourceid

        @@ -422,7 +462,7 @@
        -sqlitelibversion() str
        +sqlitelibversion() str

        Returns the version of the SQLite library. This value is queried at run time from the library so if you use shared libraries it will be the version in the shared library.

        @@ -431,7 +471,7 @@
        -status(op: int, reset: bool = False) Tuple[int, int]
        +status(op: int, reset: bool = False) Tuple[int, int]

        Returns current and highwater measurements.

        Parameters
        @@ -455,7 +495,7 @@
        -using_amalgamation: bool
        +using_amalgamation: bool

        If True then SQLite amalgamation is in use (statically compiled into APSW). Using the amalgamation means that SQLite shared libraries are not used and will not affect your @@ -464,19 +504,19 @@

        -vfsnames() List[str]
        +vfsnames() List[str]

        Returns a list of the currently installed vfs. The first item in the list is the default vfs.

    -

    SQLite constants

    +

    SQLite constants

    SQLite has many constants used in various -interfaces. To use a constant such as SQLITE_OK, just +interfaces. To use a constant such as SQLITE_OK, just use apsw.SQLITE_OK.

    The same values can be used in different contexts. For example -SQLITE_OK and SQLITE_CREATE_INDEX both have a value +SQLITE_OK and SQLITE_CREATE_INDEX both have a value of zero. For each group of constants there is also a mapping (dict) available that you can supply a string to and get the corresponding numeric value, or supply a numeric value and get the corresponding @@ -486,94 +526,167 @@ apsw.mapping_authorizer_function[20] == "SQLITE_READ"

    -

    mapping_access Flags for the xAccess VFS method

    -
    -
    -

    mapping_authorizer_function Authorizer Action Codes

    -
    -
    -

    mapping_authorizer_return Authorizer Return Codes

    -
    -
    -

    mapping_bestindex_constraints Virtual Table Constraint Operator Codes

    -
    -
    -

    mapping_config Configuration Options

    -
    -
    -

    mapping_conflict_resolution_modes Conflict resolution modes

    -
    -
    -

    mapping_db_config Database Connection Configuration Options

    -
    -
    -

    mapping_db_status Status Parameters for database connections

    -
    -
    -

    mapping_device_characteristics Device Characteristics

    -
    -
    -

    mapping_extended_result_codes Extended Result Codes

    -
    -

    SQLITE_ABORT_ROLLBACK, SQLITE_AUTH_USER, SQLITE_BUSY_RECOVERY, SQLITE_BUSY_SNAPSHOT, SQLITE_BUSY_TIMEOUT, SQLITE_CANTOPEN_CONVPATH, SQLITE_CANTOPEN_DIRTYWAL, SQLITE_CANTOPEN_FULLPATH, SQLITE_CANTOPEN_ISDIR, SQLITE_CANTOPEN_NOTEMPDIR, SQLITE_CANTOPEN_SYMLINK, SQLITE_CONSTRAINT_CHECK, SQLITE_CONSTRAINT_COMMITHOOK, SQLITE_CONSTRAINT_DATATYPE, SQLITE_CONSTRAINT_FOREIGNKEY, SQLITE_CONSTRAINT_FUNCTION, SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PINNED, SQLITE_CONSTRAINT_PRIMARYKEY, SQLITE_CONSTRAINT_ROWID, SQLITE_CONSTRAINT_TRIGGER, SQLITE_CONSTRAINT_UNIQUE, SQLITE_CONSTRAINT_VTAB, SQLITE_CORRUPT_INDEX, SQLITE_CORRUPT_SEQUENCE, SQLITE_CORRUPT_VTAB, SQLITE_ERROR_MISSING_COLLSEQ, SQLITE_ERROR_RETRY, SQLITE_ERROR_SNAPSHOT, SQLITE_IOERR_ACCESS, SQLITE_IOERR_AUTH, SQLITE_IOERR_BEGIN_ATOMIC, SQLITE_IOERR_BLOCKED, SQLITE_IOERR_CHECKRESERVEDLOCK, SQLITE_IOERR_CLOSE, SQLITE_IOERR_COMMIT_ATOMIC, SQLITE_IOERR_CONVPATH, SQLITE_IOERR_CORRUPTFS, SQLITE_IOERR_DATA, SQLITE_IOERR_DELETE, SQLITE_IOERR_DELETE_NOENT, SQLITE_IOERR_DIR_CLOSE, SQLITE_IOERR_DIR_FSYNC, SQLITE_IOERR_FSTAT, SQLITE_IOERR_FSYNC, SQLITE_IOERR_GETTEMPPATH, SQLITE_IOERR_LOCK, SQLITE_IOERR_MMAP, SQLITE_IOERR_NOMEM, SQLITE_IOERR_RDLOCK, SQLITE_IOERR_READ, SQLITE_IOERR_ROLLBACK_ATOMIC, SQLITE_IOERR_SEEK, SQLITE_IOERR_SHMLOCK, SQLITE_IOERR_SHMMAP, SQLITE_IOERR_SHMOPEN, SQLITE_IOERR_SHMSIZE, SQLITE_IOERR_SHORT_READ, SQLITE_IOERR_TRUNCATE, SQLITE_IOERR_UNLOCK, SQLITE_IOERR_VNODE, SQLITE_IOERR_WRITE, SQLITE_LOCKED_SHAREDCACHE, SQLITE_LOCKED_VTAB, SQLITE_NOTICE_RECOVER_ROLLBACK, SQLITE_NOTICE_RECOVER_WAL, SQLITE_OK_LOAD_PERMANENTLY, SQLITE_OK_SYMLINK, SQLITE_READONLY_CANTINIT, SQLITE_READONLY_CANTLOCK, SQLITE_READONLY_DBMOVED, SQLITE_READONLY_DIRECTORY, SQLITE_READONLY_RECOVERY, SQLITE_READONLY_ROLLBACK, SQLITE_WARNING_AUTOINDEX

    -
    -

    mapping_file_control Standard File Control Opcodes

    -
    -

    SQLITE_FCNTL_BEGIN_ATOMIC_WRITE, SQLITE_FCNTL_BUSYHANDLER, SQLITE_FCNTL_CHUNK_SIZE, SQLITE_FCNTL_CKPT_DONE, SQLITE_FCNTL_CKPT_START, SQLITE_FCNTL_CKSM_FILE, SQLITE_FCNTL_COMMIT_ATOMIC_WRITE, SQLITE_FCNTL_COMMIT_PHASETWO, SQLITE_FCNTL_DATA_VERSION, SQLITE_FCNTL_EXTERNAL_READER, SQLITE_FCNTL_FILE_POINTER, SQLITE_FCNTL_GET_LOCKPROXYFILE, SQLITE_FCNTL_HAS_MOVED, SQLITE_FCNTL_JOURNAL_POINTER, SQLITE_FCNTL_LAST_ERRNO, SQLITE_FCNTL_LOCKSTATE, SQLITE_FCNTL_LOCK_TIMEOUT, SQLITE_FCNTL_MMAP_SIZE, SQLITE_FCNTL_OVERWRITE, SQLITE_FCNTL_PDB, SQLITE_FCNTL_PERSIST_WAL, SQLITE_FCNTL_POWERSAFE_OVERWRITE, SQLITE_FCNTL_PRAGMA, SQLITE_FCNTL_RBU, SQLITE_FCNTL_RESERVE_BYTES, SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE, SQLITE_FCNTL_SET_LOCKPROXYFILE, SQLITE_FCNTL_SIZE_HINT, SQLITE_FCNTL_SIZE_LIMIT, SQLITE_FCNTL_SYNC, SQLITE_FCNTL_SYNC_OMITTED, SQLITE_FCNTL_TEMPFILENAME, SQLITE_FCNTL_TRACE, SQLITE_FCNTL_VFSNAME, SQLITE_FCNTL_VFS_POINTER, SQLITE_FCNTL_WAL_BLOCK, SQLITE_FCNTL_WIN32_AV_RETRY, SQLITE_FCNTL_WIN32_GET_HANDLE, SQLITE_FCNTL_WIN32_SET_HANDLE, SQLITE_FCNTL_ZIPVFS

    -
    -

    mapping_limits Run-Time Limit Categories

    -
    -
    -

    mapping_locking_level File Locking Levels

    -
    -
    -

    mapping_open_flags Flags For File Open Operations

    -
    -
    -

    mapping_result_codes Result Codes

    -
    -
    -

    mapping_status Status Parameters

    -
    -
    -

    mapping_sync Synchronization Type Flags

    -
    -
    -

    mapping_txn_state Allowed return values from [sqlite3_txn_state()]

    -
    -
    -

    mapping_virtual_table_configuration_options Virtual Table Configuration Options

    -
    -
    -

    mapping_virtual_table_scan_flags Virtual Table Scan Flags

    -
    -
    -

    mapping_wal_checkpoint Checkpoint Mode Values

    -
    -
    -

    mapping_xshmlock_flags Flags for the xShmLock VFS method

    -
    -
    +
    +
    +mapping_access: Dict[Union[str, int], Union[int, str]]
    +

    Flags for the xAccess VFS method constants

    +

    SQLITE_ACCESS_EXISTS, SQLITE_ACCESS_READ, SQLITE_ACCESS_READWRITE

    +
    + +
    +
    +mapping_authorizer_function: Dict[Union[str, int], Union[int, str]]
    +

    Authorizer Action Codes constants

    +

    SQLITE_ALTER_TABLE, SQLITE_ANALYZE, SQLITE_ATTACH, SQLITE_COPY, SQLITE_CREATE_INDEX, SQLITE_CREATE_TABLE, SQLITE_CREATE_TEMP_INDEX, SQLITE_CREATE_TEMP_TABLE, SQLITE_CREATE_TEMP_TRIGGER, SQLITE_CREATE_TEMP_VIEW, SQLITE_CREATE_TRIGGER, SQLITE_CREATE_VIEW, SQLITE_CREATE_VTABLE, SQLITE_DELETE, SQLITE_DETACH, SQLITE_DROP_INDEX, SQLITE_DROP_TABLE, SQLITE_DROP_TEMP_INDEX, SQLITE_DROP_TEMP_TABLE, SQLITE_DROP_TEMP_TRIGGER, SQLITE_DROP_TEMP_VIEW, SQLITE_DROP_TRIGGER, SQLITE_DROP_VIEW, SQLITE_DROP_VTABLE, SQLITE_FUNCTION, SQLITE_INSERT, SQLITE_PRAGMA, SQLITE_READ, SQLITE_RECURSIVE, SQLITE_REINDEX, SQLITE_SAVEPOINT, SQLITE_SELECT, SQLITE_TRANSACTION, SQLITE_UPDATE

    +
    + +
    +
    +mapping_authorizer_return: Dict[Union[str, int], Union[int, str]]
    +

    Authorizer Return Codes constants

    +

    SQLITE_DENY, SQLITE_IGNORE, SQLITE_OK

    +
    + +
    +
    +mapping_bestindex_constraints: Dict[Union[str, int], Union[int, str]]
    +

    Virtual Table Constraint Operator Codes constants

    +

    SQLITE_INDEX_CONSTRAINT_EQ, SQLITE_INDEX_CONSTRAINT_FUNCTION, SQLITE_INDEX_CONSTRAINT_GE, SQLITE_INDEX_CONSTRAINT_GLOB, SQLITE_INDEX_CONSTRAINT_GT, SQLITE_INDEX_CONSTRAINT_IS, SQLITE_INDEX_CONSTRAINT_ISNOT, SQLITE_INDEX_CONSTRAINT_ISNOTNULL, SQLITE_INDEX_CONSTRAINT_ISNULL, SQLITE_INDEX_CONSTRAINT_LE, SQLITE_INDEX_CONSTRAINT_LIKE, SQLITE_INDEX_CONSTRAINT_LIMIT, SQLITE_INDEX_CONSTRAINT_LT, SQLITE_INDEX_CONSTRAINT_MATCH, SQLITE_INDEX_CONSTRAINT_NE, SQLITE_INDEX_CONSTRAINT_OFFSET, SQLITE_INDEX_CONSTRAINT_REGEXP

    +
    + +
    +
    +mapping_config: Dict[Union[str, int], Union[int, str]]
    +

    Configuration Options constants

    +

    SQLITE_CONFIG_COVERING_INDEX_SCAN, SQLITE_CONFIG_GETMALLOC, SQLITE_CONFIG_GETMUTEX, SQLITE_CONFIG_GETPCACHE, SQLITE_CONFIG_GETPCACHE2, SQLITE_CONFIG_HEAP, SQLITE_CONFIG_LOG, SQLITE_CONFIG_LOOKASIDE, SQLITE_CONFIG_MALLOC, SQLITE_CONFIG_MEMDB_MAXSIZE, SQLITE_CONFIG_MEMSTATUS, SQLITE_CONFIG_MMAP_SIZE, SQLITE_CONFIG_MULTITHREAD, SQLITE_CONFIG_MUTEX, SQLITE_CONFIG_PAGECACHE, SQLITE_CONFIG_PCACHE, SQLITE_CONFIG_PCACHE2, SQLITE_CONFIG_PCACHE_HDRSZ, SQLITE_CONFIG_PMASZ, SQLITE_CONFIG_SCRATCH, SQLITE_CONFIG_SERIALIZED, SQLITE_CONFIG_SINGLETHREAD, SQLITE_CONFIG_SMALL_MALLOC, SQLITE_CONFIG_SORTERREF_SIZE, SQLITE_CONFIG_SQLLOG, SQLITE_CONFIG_STMTJRNL_SPILL, SQLITE_CONFIG_URI, SQLITE_CONFIG_WIN32_HEAPSIZE

    +
    + +
    +
    +mapping_conflict_resolution_modes: Dict[Union[str, int], Union[int, str]]
    +

    Conflict resolution modes constants

    +

    SQLITE_ABORT, SQLITE_FAIL, SQLITE_IGNORE, SQLITE_REPLACE, SQLITE_ROLLBACK

    +
    + +
    +
    +mapping_db_config: Dict[Union[str, int], Union[int, str]]
    +

    Database Connection Configuration Options constants

    +

    SQLITE_DBCONFIG_DEFENSIVE, SQLITE_DBCONFIG_DQS_DDL, SQLITE_DBCONFIG_DQS_DML, SQLITE_DBCONFIG_ENABLE_FKEY, SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER, SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, SQLITE_DBCONFIG_ENABLE_QPSG, SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_DBCONFIG_ENABLE_VIEW, SQLITE_DBCONFIG_LEGACY_ALTER_TABLE, SQLITE_DBCONFIG_LEGACY_FILE_FORMAT, SQLITE_DBCONFIG_LOOKASIDE, SQLITE_DBCONFIG_MAINDBNAME, SQLITE_DBCONFIG_MAX, SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE, SQLITE_DBCONFIG_RESET_DATABASE, SQLITE_DBCONFIG_TRIGGER_EQP, SQLITE_DBCONFIG_TRUSTED_SCHEMA, SQLITE_DBCONFIG_WRITABLE_SCHEMA

    +
    + +
    +
    +mapping_db_status: Dict[Union[str, int], Union[int, str]]
    +

    Status Parameters for database connections constants

    +

    SQLITE_DBSTATUS_CACHE_HIT, SQLITE_DBSTATUS_CACHE_MISS, SQLITE_DBSTATUS_CACHE_SPILL, SQLITE_DBSTATUS_CACHE_USED, SQLITE_DBSTATUS_CACHE_USED_SHARED, SQLITE_DBSTATUS_CACHE_WRITE, SQLITE_DBSTATUS_DEFERRED_FKS, SQLITE_DBSTATUS_LOOKASIDE_HIT, SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL, SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE, SQLITE_DBSTATUS_LOOKASIDE_USED, SQLITE_DBSTATUS_MAX, SQLITE_DBSTATUS_SCHEMA_USED, SQLITE_DBSTATUS_STMT_USED

    +
    + +
    +
    +mapping_device_characteristics: Dict[Union[str, int], Union[int, str]]
    +

    Device Characteristics constants

    +

    SQLITE_IOCAP_ATOMIC, SQLITE_IOCAP_ATOMIC16K, SQLITE_IOCAP_ATOMIC1K, SQLITE_IOCAP_ATOMIC2K, SQLITE_IOCAP_ATOMIC32K, SQLITE_IOCAP_ATOMIC4K, SQLITE_IOCAP_ATOMIC512, SQLITE_IOCAP_ATOMIC64K, SQLITE_IOCAP_ATOMIC8K, SQLITE_IOCAP_BATCH_ATOMIC, SQLITE_IOCAP_IMMUTABLE, SQLITE_IOCAP_POWERSAFE_OVERWRITE, SQLITE_IOCAP_SAFE_APPEND, SQLITE_IOCAP_SEQUENTIAL, SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN

    +
    + +
    +
    +mapping_extended_result_codes: Dict[Union[str, int], Union[int, str]]
    +

    Extended Result Codes constants

    +

    SQLITE_ABORT_ROLLBACK, SQLITE_AUTH_USER, SQLITE_BUSY_RECOVERY, SQLITE_BUSY_SNAPSHOT, SQLITE_BUSY_TIMEOUT, SQLITE_CANTOPEN_CONVPATH, SQLITE_CANTOPEN_DIRTYWAL, SQLITE_CANTOPEN_FULLPATH, SQLITE_CANTOPEN_ISDIR, SQLITE_CANTOPEN_NOTEMPDIR, SQLITE_CANTOPEN_SYMLINK, SQLITE_CONSTRAINT_CHECK, SQLITE_CONSTRAINT_COMMITHOOK, SQLITE_CONSTRAINT_DATATYPE, SQLITE_CONSTRAINT_FOREIGNKEY, SQLITE_CONSTRAINT_FUNCTION, SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PINNED, SQLITE_CONSTRAINT_PRIMARYKEY, SQLITE_CONSTRAINT_ROWID, SQLITE_CONSTRAINT_TRIGGER, SQLITE_CONSTRAINT_UNIQUE, SQLITE_CONSTRAINT_VTAB, SQLITE_CORRUPT_INDEX, SQLITE_CORRUPT_SEQUENCE, SQLITE_CORRUPT_VTAB, SQLITE_ERROR_MISSING_COLLSEQ, SQLITE_ERROR_RETRY, SQLITE_ERROR_SNAPSHOT, SQLITE_IOERR_ACCESS, SQLITE_IOERR_AUTH, SQLITE_IOERR_BEGIN_ATOMIC, SQLITE_IOERR_BLOCKED, SQLITE_IOERR_CHECKRESERVEDLOCK, SQLITE_IOERR_CLOSE, SQLITE_IOERR_COMMIT_ATOMIC, SQLITE_IOERR_CONVPATH, SQLITE_IOERR_CORRUPTFS, SQLITE_IOERR_DATA, SQLITE_IOERR_DELETE, SQLITE_IOERR_DELETE_NOENT, SQLITE_IOERR_DIR_CLOSE, SQLITE_IOERR_DIR_FSYNC, SQLITE_IOERR_FSTAT, SQLITE_IOERR_FSYNC, SQLITE_IOERR_GETTEMPPATH, SQLITE_IOERR_LOCK, SQLITE_IOERR_MMAP, SQLITE_IOERR_NOMEM, SQLITE_IOERR_RDLOCK, SQLITE_IOERR_READ, SQLITE_IOERR_ROLLBACK_ATOMIC, SQLITE_IOERR_SEEK, SQLITE_IOERR_SHMLOCK, SQLITE_IOERR_SHMMAP, SQLITE_IOERR_SHMOPEN, SQLITE_IOERR_SHMSIZE, SQLITE_IOERR_SHORT_READ, SQLITE_IOERR_TRUNCATE, SQLITE_IOERR_UNLOCK, SQLITE_IOERR_VNODE, SQLITE_IOERR_WRITE, SQLITE_LOCKED_SHAREDCACHE, SQLITE_LOCKED_VTAB, SQLITE_NOTICE_RECOVER_ROLLBACK, SQLITE_NOTICE_RECOVER_WAL, SQLITE_OK_LOAD_PERMANENTLY, SQLITE_OK_SYMLINK, SQLITE_READONLY_CANTINIT, SQLITE_READONLY_CANTLOCK, SQLITE_READONLY_DBMOVED, SQLITE_READONLY_DIRECTORY, SQLITE_READONLY_RECOVERY, SQLITE_READONLY_ROLLBACK, SQLITE_WARNING_AUTOINDEX

    +
    + +
    +
    +mapping_file_control: Dict[Union[str, int], Union[int, str]]
    +

    Standard File Control Opcodes constants

    +

    SQLITE_FCNTL_BEGIN_ATOMIC_WRITE, SQLITE_FCNTL_BUSYHANDLER, SQLITE_FCNTL_CHUNK_SIZE, SQLITE_FCNTL_CKPT_DONE, SQLITE_FCNTL_CKPT_START, SQLITE_FCNTL_CKSM_FILE, SQLITE_FCNTL_COMMIT_ATOMIC_WRITE, SQLITE_FCNTL_COMMIT_PHASETWO, SQLITE_FCNTL_DATA_VERSION, SQLITE_FCNTL_EXTERNAL_READER, SQLITE_FCNTL_FILE_POINTER, SQLITE_FCNTL_GET_LOCKPROXYFILE, SQLITE_FCNTL_HAS_MOVED, SQLITE_FCNTL_JOURNAL_POINTER, SQLITE_FCNTL_LAST_ERRNO, SQLITE_FCNTL_LOCKSTATE, SQLITE_FCNTL_LOCK_TIMEOUT, SQLITE_FCNTL_MMAP_SIZE, SQLITE_FCNTL_OVERWRITE, SQLITE_FCNTL_PDB, SQLITE_FCNTL_PERSIST_WAL, SQLITE_FCNTL_POWERSAFE_OVERWRITE, SQLITE_FCNTL_PRAGMA, SQLITE_FCNTL_RBU, SQLITE_FCNTL_RESERVE_BYTES, SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE, SQLITE_FCNTL_SET_LOCKPROXYFILE, SQLITE_FCNTL_SIZE_HINT, SQLITE_FCNTL_SIZE_LIMIT, SQLITE_FCNTL_SYNC, SQLITE_FCNTL_SYNC_OMITTED, SQLITE_FCNTL_TEMPFILENAME, SQLITE_FCNTL_TRACE, SQLITE_FCNTL_VFSNAME, SQLITE_FCNTL_VFS_POINTER, SQLITE_FCNTL_WAL_BLOCK, SQLITE_FCNTL_WIN32_AV_RETRY, SQLITE_FCNTL_WIN32_GET_HANDLE, SQLITE_FCNTL_WIN32_SET_HANDLE, SQLITE_FCNTL_ZIPVFS

    +
    + +
    +
    +mapping_limits: Dict[Union[str, int], Union[int, str]]
    +

    Run-Time Limit Categories constants

    +

    SQLITE_LIMIT_ATTACHED, SQLITE_LIMIT_COLUMN, SQLITE_LIMIT_COMPOUND_SELECT, SQLITE_LIMIT_EXPR_DEPTH, SQLITE_LIMIT_FUNCTION_ARG, SQLITE_LIMIT_LENGTH, SQLITE_LIMIT_LIKE_PATTERN_LENGTH, SQLITE_LIMIT_SQL_LENGTH, SQLITE_LIMIT_TRIGGER_DEPTH, SQLITE_LIMIT_VARIABLE_NUMBER, SQLITE_LIMIT_VDBE_OP, SQLITE_LIMIT_WORKER_THREADS

    +
    + +
    +
    +mapping_locking_level: Dict[Union[str, int], Union[int, str]]
    +

    File Locking Levels constants

    +

    SQLITE_LOCK_EXCLUSIVE, SQLITE_LOCK_NONE, SQLITE_LOCK_PENDING, SQLITE_LOCK_RESERVED, SQLITE_LOCK_SHARED

    +
    + +
    +
    +mapping_open_flags: Dict[Union[str, int], Union[int, str]]
    +

    Flags For File Open Operations constants

    +

    SQLITE_OPEN_AUTOPROXY, SQLITE_OPEN_CREATE, SQLITE_OPEN_DELETEONCLOSE, SQLITE_OPEN_EXCLUSIVE, SQLITE_OPEN_EXRESCODE, SQLITE_OPEN_FULLMUTEX, SQLITE_OPEN_MAIN_DB, SQLITE_OPEN_MAIN_JOURNAL, SQLITE_OPEN_MEMORY, SQLITE_OPEN_NOFOLLOW, SQLITE_OPEN_NOMUTEX, SQLITE_OPEN_PRIVATECACHE, SQLITE_OPEN_READONLY, SQLITE_OPEN_READWRITE, SQLITE_OPEN_SHAREDCACHE, SQLITE_OPEN_SUBJOURNAL, SQLITE_OPEN_SUPER_JOURNAL, SQLITE_OPEN_TEMP_DB, SQLITE_OPEN_TEMP_JOURNAL, SQLITE_OPEN_TRANSIENT_DB, SQLITE_OPEN_URI, SQLITE_OPEN_WAL

    +
    + +
    +
    +mapping_prepare_flags: Dict[Union[str, int], Union[int, str]]
    +

    Prepare Flags constants

    +

    SQLITE_PREPARE_NORMALIZE, SQLITE_PREPARE_NO_VTAB, SQLITE_PREPARE_PERSISTENT

    +
    + +
    +
    +mapping_result_codes: Dict[Union[str, int], Union[int, str]]
    +

    Result Codes constants

    +

    SQLITE_ABORT, SQLITE_AUTH, SQLITE_BUSY, SQLITE_CANTOPEN, SQLITE_CONSTRAINT, SQLITE_CORRUPT, SQLITE_DONE, SQLITE_EMPTY, SQLITE_ERROR, SQLITE_FORMAT, SQLITE_FULL, SQLITE_INTERNAL, SQLITE_INTERRUPT, SQLITE_IOERR, SQLITE_LOCKED, SQLITE_MISMATCH, SQLITE_MISUSE, SQLITE_NOLFS, SQLITE_NOMEM, SQLITE_NOTADB, SQLITE_NOTFOUND, SQLITE_NOTICE, SQLITE_OK, SQLITE_PERM, SQLITE_PROTOCOL, SQLITE_RANGE, SQLITE_READONLY, SQLITE_ROW, SQLITE_SCHEMA, SQLITE_TOOBIG, SQLITE_WARNING

    +
    + +
    +
    +mapping_status: Dict[Union[str, int], Union[int, str]]
    +

    Status Parameters constants

    +

    SQLITE_STATUS_MALLOC_COUNT, SQLITE_STATUS_MALLOC_SIZE, SQLITE_STATUS_MEMORY_USED, SQLITE_STATUS_PAGECACHE_OVERFLOW, SQLITE_STATUS_PAGECACHE_SIZE, SQLITE_STATUS_PAGECACHE_USED, SQLITE_STATUS_PARSER_STACK, SQLITE_STATUS_SCRATCH_OVERFLOW, SQLITE_STATUS_SCRATCH_SIZE, SQLITE_STATUS_SCRATCH_USED

    +
    + +
    +
    +mapping_sync: Dict[Union[str, int], Union[int, str]]
    +

    Synchronization Type Flags constants

    +

    SQLITE_SYNC_DATAONLY, SQLITE_SYNC_FULL, SQLITE_SYNC_NORMAL

    +
    + +
    +
    +mapping_txn_state: Dict[Union[str, int], Union[int, str]]
    +

    Allowed return values from [sqlite3_txn_state()] constants

    +

    SQLITE_TXN_NONE, SQLITE_TXN_READ, SQLITE_TXN_WRITE

    +
    + +
    +
    +mapping_virtual_table_configuration_options: Dict[Union[str, int], Union[int, str]]
    +

    Virtual Table Configuration Options constants

    +

    SQLITE_VTAB_CONSTRAINT_SUPPORT, SQLITE_VTAB_DIRECTONLY, SQLITE_VTAB_INNOCUOUS

    +
    + +
    +
    +mapping_virtual_table_scan_flags: Dict[Union[str, int], Union[int, str]]
    +

    Virtual Table Scan Flags constants

    +

    SQLITE_INDEX_SCAN_UNIQUE

    +
    + +
    +
    +mapping_wal_checkpoint: Dict[Union[str, int], Union[int, str]]
    +

    Checkpoint Mode Values constants

    +

    SQLITE_CHECKPOINT_FULL, SQLITE_CHECKPOINT_PASSIVE, SQLITE_CHECKPOINT_RESTART, SQLITE_CHECKPOINT_TRUNCATE

    +
    + +
    +
    +mapping_xshmlock_flags: Dict[Union[str, int], Union[int, str]]
    +

    Flags for the xShmLock VFS method constants

    +

    SQLITE_SHM_EXCLUSIVE, SQLITE_SHM_LOCK, SQLITE_SHM_SHARED, SQLITE_SHM_UNLOCK

    +
    + @@ -584,22 +697,93 @@
    @@ -636,15 +820,15 @@
  • previous |
  • - + @@ -54,15 +56,15 @@
    -

    Backup

    +

    Backup

    A backup object encapsulates copying one database to another. You call Connection.backup() on the destination database to get the -backup object. Call step() to copy some pages +Backup object. Call step() to copy some pages repeatedly dealing with errors as appropriate. Finally -finish() cleans up committing or rolling back and +finish() cleans up committing or rolling back and releasing locks.

    Here is an example usage using the with statement to ensure -finish() is called:

    +finish() is called:

    # copies source.main into db
     with db.backup("main", source, "main") as b:
         while not b.done:
    @@ -71,7 +73,7 @@
     

    If you are not using with then you’ll need to ensure -finish() is called:

    +finish() is called:

    # copies source.main into db
     b=db.backup("main", source, "main")
     try:
    @@ -83,14 +85,14 @@
     
    -

    Important details

    +

    Important details

    The database is copied page by page. This means that there is not a round trip via SQL. All pages are copied including free ones.

    The destination database is locked during the copy. You will get a ThreadingViolationError if you attempt to use it.

    -

    Backup class

    +

    Backup class

    class Backup
    @@ -101,23 +103,23 @@
    Backup.__enter__() Backup

    You can use the backup object as a context manager -as defined in PEP 0343. The __exit__() method ensures that backup -is finished.

    +as defined in PEP 0343. The __exit__() method ensures that backup +is finished.

    -Backup.__exit__(etype: Optional[type[BaseException]], evalue: Optional[BaseException], etraceback: Optional[TracebackType]) Literal[False]
    -

    Implements context manager in conjunction with __enter__() ensuring -that the copy is finished.

    +Backup.__exit__(etype: Optional[type[BaseException]], evalue: Optional[BaseException], etraceback: Optional[types.TracebackType]) Optional[bool] +

    Implements context manager in conjunction with __enter__() ensuring +that the copy is finished.

    -Backup.close(force: bool = False) None
    -

    Does the same thing as finish(). This extra api is +Backup.close(force: bool = False) None +

    Does the same thing as finish(). This extra api is provided to give the same api as other APSW objects such as -Connection.close(), blob.close() and +Connection.close(), Blob.close() and Cursor.close(). It is safe to call this method multiple times.

    @@ -129,13 +131,13 @@
    -Backup.done: bool
    -

    A boolean that is True if the copy completed in the last call to step().

    +Backup.done: bool +

    A boolean that is True if the copy completed in the last call to step().

    -Backup.finish() None
    +Backup.finish() None

    Completes the copy process. If all pages have been copied then the transaction is committed on the destination database, otherwise it is rolled back. This method must be called for your backup to take @@ -146,30 +148,30 @@

    -Backup.pagecount: int
    +Backup.pagecount: int

    Read only. How many pages were in the source database after the last -step. If you haven’t called step() or the backup -object has been finished then zero is +step. If you haven’t called step() or the backup +object has been finished then zero is returned.

    Calls: sqlite3_backup_pagecount

    -Backup.remaining: int
    +Backup.remaining: int

    Read only. How many pages were remaining to be copied after the last -step. If you haven’t called step() or the backup -object has been finished then zero is +step. If you haven’t called step() or the backup +object has been finished then zero is returned.

    Calls: sqlite3_backup_remaining

    -Backup.step(npages: int = - 1) bool
    +Backup.step(npages: int = -1) bool

    Copies npages pages from the source to destination database. The source database is locked during the copy so using smaller values allows other access to the source database. The destination database is always locked until the -backup object is finished.

    +backup object is finished.

    Parameters

    npages – How many pages to copy. If the parameter is omitted @@ -183,7 +185,7 @@ again.

    Returns
    -

    True if this copied the last remaining outstanding pages, else false. This is the same value as done

    +

    True if this copied the last remaining outstanding pages, else false. This is the same value as done

    Calls: sqlite3_backup_step

    @@ -199,21 +201,40 @@
    @@ -250,15 +271,15 @@
  • previous |
  • - +
    @@ -205,17 +204,17 @@ next |
  • - previous |
  • - + @@ -54,7 +56,7 @@
    -

    Blob Input/Output

    +

    Blob Input/Output

    A blob is a SQLite datatype representing a sequence of bytes. It can be zero or more bytes in size.

    @@ -64,10 +66,10 @@

    An alternate approach to using blobs is to store the data in files and store the filename in the database. Doing so loses the ACID properties of SQLite.

    -

    zeroblob class

    +

    zeroblob class

    -class zeroblob(size: int)
    +class zeroblob(size: int)

    If you want to insert a blob into a row, you previously needed to supply the entire blob in one go. To read just one byte also required retrieving the blob in its entirety. For example to insert @@ -88,19 +90,24 @@

    This class is used for the second way. Once a blob exists in the -database, you then use the blob class to read and write its +database, you then use the Blob class to read and write its contents.

    +
    +
    Parameters
    +

    size – Number of zeroed bytes to create

    +
    +
    -zeroblob.length() int
    +zeroblob.length() int

    Size of zero blob in bytes.

    -

    Blob class

    +

    Blob class

    class Blob
    @@ -113,15 +120,15 @@ create it with the correct size in advance either by using zeroblob or the zeroblob() function.

    -

    See the example.

    +

    See the example.

    Blob.__enter__() Blob

    You can use a blob as a context manager -as defined in PEP 0343. When you use with statement, -the blob is always closed on exit from the block, even if an +as defined in PEP 0343. When you use with statement, +the blob is always closed on exit from the block, even if an exception occurred in the block.

    For example:

    with connection.blobopen() as blob:
    @@ -133,28 +140,28 @@
     
     
    -Blob.__exit__() Literal[False]
    +Blob.__exit__(etype: Optional[type[BaseException]], evalue: Optional[BaseException], etraceback: Optional[types.TracebackType]) Optional[bool]

    Implements context manager in conjunction with -__enter__(). Any exception that happened in the +__enter__(). Any exception that happened in the with block is raised after closing the blob.

    -Blob.close(force: bool = False) None
    +Blob.close(force: bool = False) None

    Closes the blob. Note that even if an error occurs the blob is still closed.

    Note

    In some cases errors that technically occurred in the -read() and write() routines may not be +read() and write() routines may not be reported until close is called. Similarly errors that occurred -in those methods (eg calling write() on a read-only -blob) may also be re-reported in close(). (This +in those methods (eg calling write() on a read-only +blob) may also be re-reported in close(). (This behaviour is what the underlying SQLite APIs do - it is not APSW doing it.)

    -

    It is okay to call close() multiple times.

    +

    It is okay to call close() multiple times.

    Parameters

    force – Ignores any errors during close.

    @@ -165,14 +172,14 @@
    -Blob.length() int
    +Blob.length() int

    Returns the size of the blob in bytes.

    Calls: sqlite3_blob_bytes

    -Blob.read(length: int = - 1) bytes
    +Blob.read(length: int = -1) bytes

    Reads amount of data requested, or till end of file, whichever is earlier. Attempting to read beyond the end of the blob returns an empty bytes in the same manner as end of file on normal file @@ -182,23 +189,23 @@

    -Blob.readinto(buffer: Union[bytearray, array[Any], memoryview], offset: int = 0, length: int = - 1) None
    +Blob.readinto(buffer: Union[bytearray, array.array[Any], memoryview], offset: int = 0, length: int = -1) None

    Reads from the blob into a buffer you have supplied. This method is useful if you already have a buffer like object that data is being -assembled in, and avoids allocating results in blob.read() and +assembled in, and avoids allocating results in Blob.read() and then copying into buffer.

    Parameters
    • buffer – A writable buffer like object. There is a bytearray type that is very useful. -array.array also works.

    • +arrays also work.

    • offset – The position to start writing into the buffer defaulting to the beginning.

    • length – How much of the blob to read. The default is the remaining space left in the buffer. Note that if there is more space available than blob left then you -will get a ValueError exception.

    • +will get a ValueError exception.

    @@ -207,7 +214,7 @@
    -Blob.reopen(rowid: int) None
    +Blob.reopen(rowid: int) None

    Change this blob object to point to a different row. It can be faster than closing an existing blob an opening a new one.

    Calls: sqlite3_blob_reopen

    @@ -215,7 +222,7 @@
    -Blob.seek(offset: int, whence: int = 0) None
    +Blob.seek(offset: int, whence: int = 0) None

    Changes current position to offset biased by whence.

    Parameters
    @@ -227,20 +234,20 @@
    Raises
    -

    ValueError – If the resulting offset is before the beginning (less than zero) or beyond the end of the blob.

    +

    ValueError – If the resulting offset is before the beginning (less than zero) or beyond the end of the blob.

    -Blob.tell() int
    +Blob.tell() int

    Returns the current offset.

    -Blob.write(data: bytes) None
    +Blob.write(data: bytes) None

    Writes the data to the blob.

    Parameters
    @@ -248,8 +255,8 @@
    Raises
      -
    • TypeError – Wrong data type

    • -
    • ValueError – If the data would go beyond the end of the blob. +

    • TypeError – Wrong data type

    • +
    • ValueError – If the data would go beyond the end of the blob. You cannot increase the size of a blob by writing beyond the end. You need to use zeroblob to set the desired size first when inserting the blob.

    • @@ -269,21 +276,48 @@
    @@ -320,15 +354,15 @@
  • previous |
  • - + @@ -54,11 +56,11 @@
    -

    Building

    +

    Building

    See version info to understand the relationship between Python, APSW, and SQLite versions.

    -

    setup.py

    +

    setup.py

    Short story: You run setup.py but you should ideally follow the recommended way which will also fetch needed components for you.

    @@ -81,23 +83,23 @@ site library directory and then runs the test suite.

    -
    python setup.py install --user
    +
    python setup.py install –user

    Compiles APSW with default Python compiler and installs it into a subdirectory of your home directory. -See PEP 370 for more details.

    +See PEP 370 for more details.

    -
    python setup.py build_ext --force ---inplace test
    +
    python setup.py build_ext –force +–inplace test

    Compiles the extension but doesn’t install it. The test suite is then run.

    -
    python setup.py build --debug install
    +
    python setup.py build –debug install

    Compiles APSW with debug information. This also turns on assertions @@ -109,17 +111,13 @@

    -

    Additional setup.py flags

    +

    Additional setup.py flags

    There are a number of APSW specific flags to commands you can specify.

    -

    fetch

    +

    fetch

    setup.py can automatically fetch SQLite and other optional -components. You can set the environment variable http_proxy -to control proxy usage for the download. Note the files downloaded -are modified from their originals to ensure various names do not -clash, adjust them to the download platform and to graft them cleanly -into the APSW module. You should not commit them to source code -control systems (download separately if you need clean files).

    +components. You can set the environment variable http_proxy +to control proxy usage for the download.

    If any files are downloaded then the build step will automatically use them. This still applies when you do later builds without re-fetching.

    @@ -140,7 +138,7 @@
    -
    --version=VERSION
    +
    –version=VERSION

    By default the SQLite version corresponding to the APSW release is retrieved, You @@ -148,26 +146,26 @@ page to work out the most recent version.

    -
    --missing-checksum-ok
    +
    –missing-checksum-ok

    Allows setup to continue if the checksum is missing.

    -
    --all
    +
    –all

    Gets all components listed below.

    -
    --sqlite
    +
    –sqlite

    Automatically downloads the SQLite amalgamation. The amalgamation is the preferred way to use SQLite as you have total control over what components are included or excluded (see below) and have no dependencies on any existing libraries on your developer or deployment machines. The amalgamation includes the -fts3/4/5, rtree, json1 and icu extensions. On non-Windows platforms, any existing +fts3/4/5, rtree, json1 and icu extensions. Nny existing sqlite3/ directory will be erased and the downloaded code placed in a newly created sqlite3/ directory.

    @@ -187,7 +185,7 @@ (setup only uses the page to determine the current version number - the SQLite download site URL is hard coded.)

    If the URL is not listed in the checksums file then setup aborts. -You can use --missing-checksum-ok to continue. You are +You can use –missing-checksum-ok to continue. You are recommended instead to update the checksums file with the correct information.

    @@ -217,21 +215,24 @@
    -

    build/build_ext

    +

    build/build_ext

    You can enable or omit certain functionality by specifying flags to -the build and/or build_ext commands of setup.py.

    -
    -
    -
    python setup.py build options
    +the build and/or build_ext commands of setup.py:

    +
    python setup.py build *options*
    +
    -

    Note that the options do not accumulate. If you want to specify multiple enables or omits then you need to give the flag once and giving a comma separated list. For example:

    -
    -
    -
    python setup.py build --enable=fts3,fts3_parenthesis,rtree,icu
    +
    python setup.py build --enable=fts3,fts3_parenthesis,rtree,icu
    +
    +
    +

    SQLite includes many options defined to the C compiler. If you want to change +compiled in default values, or provide defines like +SQLITE_CUSTOM_INCLUDE then you can use –definevalues using += and comma separating. For example:

    +
    python setup.py build_ext --definevalues SQLITE_DEFAULT_FILE_FORMAT=1,SQLITE_CUSTOM_INCLUDE=config.h
    +
    -
    @@ -244,16 +245,16 @@
    -
    --enable-all-extensions
    +
    –enable-all-extensions

    Enables the STAT4, FTS3/4/5, RTree, JSON1, RBU, and ICU extensions if icu-config is on your path

    -
    --enable=fts3
    -
    --enable=fts4
    -
    --enable=fts5
    +
    –enable=fts3
    +
    –enable=fts4
    +
    –enable=fts5

    Enables the full text search extension. @@ -263,7 +264,7 @@ legacy code (–enable-all-extensions turns it on).

    -
    --enable=rtree
    +
    –enable=rtree

    Enables the spatial table extension. @@ -272,7 +273,7 @@ install.

    -
    --enable=rbu
    +
    –enable=rbu

    Enables the reumable bulk update extension. @@ -281,7 +282,7 @@ install.

    -
    --enable=icu
    +
    –enable=icu

    Enables the International Components for Unicode extension. @@ -292,11 +293,11 @@ install.

    -
    --omit=ITEM
    +
    –omit=ITEM

    Causes various functionality to be omitted. For example ---omit=load_extension will omit code to do with loading extensions. If +–omit=load_extension will omit code to do with loading extensions. If using the amalgamation then this will omit the functionality from APSW and SQLite, otherwise the functionality will only be omitted from APSW (ie the code will still be in SQLite, APSW just won’t call it). In almost all cases you will need @@ -305,25 +306,36 @@

    -
    -

    Note

    -

    Extension loading is enabled by default when using the amalgamation -and disabled when using existing libraries as this most closely -matches current practise. Use --omit=load_extension or ---enable=load_extension to explicitly disable/enable the -extension loading code.

    -
    +
    +

    Matching APSW and SQLite options

    +

    APSW needs to see the same options as SQLite to correctly match it. +For example if SQLite is compiled without loadable extensions, then +APSW also needs to know that at compile time because the APIs won’t be +present. Another example is Cursor.description_full needs to +know if SQLITE_ENABLE_COLUMN_METADATA was defined when building +SQLite for the same reason.

    +

    If you use the amalgamation (recommended configuration) then APSW and +SQLite will see the same options and will be correctly in sync.

    +

    If you are using the system provided SQLite then specify +–use-system-sqlite-config to build_ext, and the configuration +will be automatically obtained (using ctypes find_library)

    +

    You can use the amalgamation and –use-system-sqlite-config +simultaneously in which case the amalgamation will have an identical +configuration to the system one. This is useful if you are using a +newer SQLite version in the amalgamation, but still want to match the +system.

    +
    -

    Finding SQLite 3

    +

    Finding SQLite 3

    SQLite 3 is needed during the build process. If you specify -fetch --sqlite to the setup.py command line +fetch –sqlite to the setup.py command line then it will automatically fetch the current version of the SQLite amalgamation. (The current version is determined by parsing the SQLite download page). You can manually specify the version, for example -fetch --sqlite --version=3.7.4.

    +fetch –sqlite –version=3.39.4.

    These methods are tried in order:

    Amalgamation

    @@ -332,7 +344,7 @@ looked for. The SQLite code is then statically compiled into the APSW extension and is invisible to the rest of the process. There are no runtime library dependencies on SQLite as -a result. When you use fetch this is where it places +a result. When you use fetch this is where it places the downloaded amalgamation.

    Local build

    @@ -341,8 +353,8 @@

    User directories

    -

    If specifying --user then your user directory is -searched first. See PEP 370 for more details.

    +

    If specifying –user then your user directory is +searched first. See PEP 370 for more details.

    System directories

    @@ -352,13 +364,13 @@

    Note

    If you compiled SQLite with any OMIT flags (eg -SQLITE_OMIT_LOAD_EXTENSION) then you must include them in +SQLITE_OMIT_LOAD_EXTENSION) then you must include them in the setup.py command or file. For this example you could use -setup.py build --omit=load_extension to add the same flags.

    +setup.py build –omit=load_extension to add the same flags.

    -

    Source distribution (advanced)

    +

    Source distribution (advanced)

    If you want to make a source distribution or a binary distribution that creates an intermediate source distribution such as bdist_rpm then you can have the SQLite amalgamation automatically included as @@ -406,25 +417,23 @@

    -

    Testing

    +

    Testing

    SQLite itself is extensively tested. It has considerably more code dedicated to testing than makes up the actual database functionality.

    -

    APSW includes a tests.py file which uses the standard Python -testing modules to verify correct operation. New code is developed -alongside the tests. Reported issues also have test cases to ensure -the issue doesn’t happen or doesn’t happen again.:

    -
    $ python3 setup.py test
    -running test
    -                Python  /usr/bin/python3 sys.version_info(major=3, minor=9, micro=7, releaselevel='final', serial=0)
    -Testing with APSW file  /space/apsw/apsw.cpython-39-x86_64-linux-gnu.so
    -          APSW version  3.38.0-r1
    -    SQLite lib version  3.38.0
    -SQLite headers version  3038000
    +

    APSW includes tests which use the standard Python testing modules to +verify correct operation. New code is developed alongside the tests. +Reported issues also have test cases to ensure the issue doesn’t +happen or doesn’t happen again.:

    +
    $ python3 -m apsw.tests
    +                Python  /usr/bin/python3 sys.version_info(major=3, minor=10, micro=4, releaselevel='final', serial=0)
    +Testing with APSW file  /space/apsw/apsw/__init__.cpython-310-x86_64-linux-gnu.so
    +          APSW version  3.39.2.0
    +    SQLite lib version  3.39.2
    +SQLite headers version  3039002
         Using amalgamation  True
    -
    -..............................................................................................
    +...............................................................................................
     ----------------------------------------------------------------------
    -Ran 94 tests in 27.713s
    +Ran 95 tests in 25.990s
     
     OK
     
    @@ -440,11 +449,12 @@ running the test suite. The test suite is run multiple times to make any memory leaks or similar issues stand out. A checking version of Python is also used. See tools/valgrind.sh in the source. -The same testing is also done with the compiler’s sanitizer option.

    +The same testing is also done with the compiler’s sanitizer option.

    To ensure compatibility with the various Python versions, a script downloads and compiles all supported Python versions in both debug and -release configurations against the APSW and SQLite supported versions -running the tests. See tools/megatest.py in the source.

    +release configurations (and 32 and 64 bit) against the APSW and SQLite +supported versions running the tests. See tools/megatest.py +in the source.

    In short both SQLite and APSW have a lot of testing!

    @@ -456,8 +466,9 @@
    @@ -515,15 +532,15 @@
  • previous |
  • - + @@ -50,9 +52,89 @@
    -

    Change History

    +

    Change History

    -

    3.39.2.0

    +

    3.40.0.0

    +

    Fixed regression in statement cache update (version 3.38.1-r1) where +trailing whitespace in queries would be incorrectly treated as +incomplete execution (APSW issue 376)

    +

    Added Various interesting and useful bits of functionality (APSW issue 369)

    +

    Added more Pythonic attributes as an alternative to getters and +setters, including Connection.in_transaction, +Connection.exectrace, Connection.rowtrace, +Cursor.exectrace, Cursor.rowtrace, +Cursor.connection (APSW issue 371)

    +

    Completed: To the extent permitted by CPython APIs every item has the +same docstring as this documentation. Every API can use named +parameters. The type stubs +cover everything including constants. The type stubs also include +documentation for everything, which for example Visual Studio Code +displays as you type or hover. There is a single source of +documentation in the source code, which is then automatically +extracted to make this documentation, docstrings, and docstrings in +the type stubs.

    +

    Example/Tour updated and appearance improved (APSW issue 367).

    +
    +
    +

    3.39.4.0

    +

    Added Connection.cache_stats() to provide more information about +the statement cache.

    +

    Cursor.execute() now uses sqlite_prepare_v3 which allows supplying +flags.

    +

    Cursor.execute() has a new can_cache parameter to control +whether the query can use the statement cache. One example use is +with authorizers because they only +run during prepare, which doesn’t happen with already cached +statements.

    +

    (The Cursor.execute() additional parameters are keyword only and +also present in Cursor.executemany(), and the corresponding +Connection.execute() and Connection.executemany() +methods.)

    +

    Added Cursor.is_readonly, Cursor.is_explain, and +Cursor.expanded_sql.

    +

    Updated processing named bindings so that types registered with +collections.abc.Mapping (such as +collections.UserDict) will also be treated as dictionaries. +(APSW issue 373)

    +
    +
    +

    3.39.3.0

    +

    Test no longer fails if APSW was compiled without +SQLITE_ENABLE_COLUMN_METADATA but sqlite3 was separately compiled with +it. APSW should be compiled with the same flags as sqlite3 to match +functionality and APIs. (APSW issue 363)

    +

    –use-system-sqlite-config setup.py build_ext option added to +allow Matching APSW and SQLite options. (APSW issue 364)

    +
    +
    +

    3.39.2.1

    +

    PyPI now includes Python 3.11 builds.

    +

    Instead of using scripts, you can now run several tools directly:

    +
      +
    • tests: python3 -m apsw.tests [options]

    • +
    • tracer: python3 -m apsw.trace [options]

    • +
    • speed tester: python3 -m apsw.speedtest [options]

    • +
    • shell: python3 -m apsw [options]

    • +
    +

    The shell class has moved from apsw.Shell to apsw.shell.Shell +(APSW issue 356). You can still reference it via the old name (ie +existing code will not break, except on Python 3.6).

    +

    Shell: On Windows the native console support for colour is now used +(previously a third party module was supported).

    +

    You can use –definevalues in setup.py build_ext to provide compiler defines used for configuring +SQLite. (APSW issue 357)

    +

    If SQLITE_ENABLE_COLUMN_METADATA is enabled then +Cursor.description_full is available providing all the column +metadata available. (APSW issue 354)

    +

    Connection.cursor_factory attribute is now present and is used +when Connection.cursor() is called. Added +Connection.execute() and Connection.executemany() which +automatically obtain the underlying cursor. See customizing +connections and cursors in the +Tips. (APSW issue 361)

    +
    +
    +

    3.39.2.0

    Version numbering scheme change: Instead of a -r1 style suffix, there is .0 style suffix (APSW issue 340)

    Updated building for PyPI to include more compiled platforms, @@ -68,7 +150,7 @@

    Added Connection.db_names() (APSW issue 343)

    -

    3.38.5-r1

    +

    3.38.5-r1

    APSW is now on PyPI, so you can:

    pip install apsw
     
    @@ -81,8 +163,8 @@

    Python 3.11 (APSW issue 326) now works.

    PyPy3 compiles and mostly works (APSW issue 323).

    -
    -

    3.38.1-r1

    +
    +

    3.38.1-r1

    All items now have full docstrings including type information. (Previously just one line summaries). Note the C implemented functions and data (ie almost all of APSW) can’t provide the same @@ -103,8 +185,8 @@

  • SQLITE_INDEX_CONSTRAINT_OFFSET, SQLITE_INDEX_CONSTRAINT_LIMIT

  • -
    -

    3.37.0-r1

    +
    +

    3.37.0-r1

    Allow breaking of reference cycles between objects that contain a Connection or Cursor, and also use callbacks from that object (eg busy handler). (APSW issue 314)

    @@ -127,8 +209,8 @@
  • SQLITE_CONSTRAINT_DATATYPE, SQLITE_OPEN_EXRESCODE

  • -
    -

    3.36.0-r1

    +
    +

    3.36.0-r1

    Implemented Connection.serialize() and Connection.deserialize(). They turn a database into bytes, and bytes into a database respectively.

    @@ -139,16 +221,16 @@
  • SQLITE_FCNTL_EXTERNAL_READER, SQLITE_FCNTL_CKSM_FILE

  • -
    -

    3.35.4-r1

    +
    +

    3.35.4-r1

    Updates for SQLite download url (the year is part of the urls).

    Added enable flag for built-in SQL math functions, and enable it by default with –enable-all-extensions.

    Use the newer buffer API for Python 3 (old API removed in Python 3.10).

    -
    -

    3.34.0-r1

    +
    +

    3.34.0-r1

    Windows MSI installer files are now provided in addition to the exe files (APSW issue 294), as well as wheels for Python 3.6+. Python 3.9 binaries are also now available. The wheels can be installed via pip.

    @@ -158,8 +240,8 @@
  • SQLITE_IOERR_CORRUPTFS

  • -
    -

    3.33.0-r1

    +
    +

    3.33.0-r1

    Small performance improvement in string handling

    apsw module exposes Cursor, Blob, and Backup types (APSW issue 273)

    pkg-config is used to detect International Components for Unicode (ICU) sdk when the SQLite ICU extension is @@ -169,8 +251,8 @@

  • SQLITE_OPEN_SUPER_JOURNAL

  • -
    -

    3.32.2-r1

    +
    +

    3.32.2-r1

    Added constants:

    • SQLITE_IOERR_DATA, SQLITE_CORRUPT_INDEX, SQLITE_BUSY_TIMEOUT, SQLITE_FCNTL_CKPT_START, @@ -178,8 +260,8 @@

    Minor documentation updates

    -
    -

    3.31.1-r1

    +
    +

    3.31.1-r1

    Various updates due to year change

    Fix deprecated universal newline use in shell (APSW issue 283)

    Shell now uses pragma function_list to get list of functions for tab completion

    @@ -190,8 +272,8 @@ SQLITE_FCNTL_CKPT_DONE, SQLITE_OPEN_NOFOLLOW, SQLITE_VTAB_DIRECTONLY

    -
    -

    3.30.1-r1

    +
    +

    3.30.1-r1

    Added constants:

    • SQLITE_DBCONFIG_ENABLE_VIEW

    • @@ -199,8 +281,8 @@

      Updated hashing of SQL statements (APSW issue 274)

      Python 3.8 Windows binaries available.

    -
    -

    3.29.0-r1

    +
    +

    3.29.0-r1

    Added constants:

    -
    -

    3.28.0-r1

    +
    +

    3.28.0-r1

    Added constant:

    • SQLITE_DBCONFIG_WRITABLE_SCHEMA

    -
    -

    3.27.2-r1

    +
    +

    3.27.2-r1

    Added constants:

    • SQLITE_CONFIG_MEMDB_MAXSIZE, SQLITE_FCNTL_SIZE_LIMIT

    • @@ -224,15 +306,15 @@

      Added support for the geopoly extension (APSW issue 253)

      Removed hash optimisation that isn’t useful any more (APSW issue 256)

    -
    -

    3.26.0-r1

    +
    +

    3.26.0-r1

    Added constant:

    • SQLITE_DBCONFIG_DEFENSIVE

    -
    -

    3.25.2-r1

    +
    +

    3.25.2-r1

    Added constants:

    • SQLITE_INDEX_CONSTRAINT_FUNCTION, SQLITE_CANTOPEN_DIRTYWAL, SQLITE_ERROR_SNAPSHOT, SQLITE_FCNTL_DATA_VERSION

    • @@ -240,8 +322,8 @@

      Shell output mode now has lines and columns for compatibility (APSW issue 214)

      Example now runs under both Python 2 and 3.

    -
    -

    3.24.0-r1

    +
    +

    3.24.0-r1

    Added constants:

    • SQLITE_DBCONFIG_RESET_DATABASE, and support for it in Connection.config()

    • @@ -250,23 +332,23 @@

      Added keywords and updated the shell to use it.

      Python 3.7 Windows binaries are provided.

    -
    -

    3.23.1-r1

    +
    +

    3.23.1-r1

    Added constants:

    • SQLITE_DBSTATUS_CACHE_SPILL, SQLITE_FCNTL_LOCK_TIMEOUT

    -
    -

    3.22.0-r1

    +
    +

    3.22.0-r1

    Added constants:

    • SQLITE_DBCONFIG_TRIGGER_EQP, SQLITE_DBCONFIG_MAX

    • SQLITE_READONLY_CANTINIT, SQLITE_ERROR_RETRY, SQLITE_ERROR_MISSING_COLLSEQ, SQLITE_READONLY_DIRECTORY

    -
    -

    3.21.0-r1

    +
    +

    3.21.0-r1

    Added constants:

    • SQLITE_INDEX_CONSTRAINT_ISNULL, SQLITE_INDEX_CONSTRAINT_ISNOT, @@ -281,54 +363,54 @@

    Many spelling fixes (thanks to Edward Betts for the review)

    -
    -

    3.20.1-r1

    +
    +

    3.20.1-r1

    Added SQLITE_DBCONFIG_ENABLE_QPSG constant.

    Added shell .open command (APSW issue 240)

    -
    -

    3.19.3-r1

    +
    +

    3.19.3-r1

    No APSW changes.

    -
    -

    3.18.0-r1

    +
    +

    3.18.0-r1

    Updated completions in shell (eg added pragmas).

    Resumable Bulk Update (RBU) extension is now built by default for –enable-all-extensions.

    Added Connection.set_last_insert_rowid().

    -
    -

    3.17.0-r1

    +
    +

    3.17.0-r1

    No APSW changes.

    -
    -

    3.16.2-r1

    +
    +

    3.16.2-r1

    Python 3.6 builds added.

    Added SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE and SQLITE_FCNTL_PDB constants.

    -
    -

    3.15.2-r1

    +
    +

    3.15.2-r1

    No APSW changes.

    -
    -

    3.15.1-r1

    +
    +

    3.15.1-r1

    Added SQLITE_FCNTL_WIN32_GET_HANDLE constant.

    -
    -

    3.15.0-r1

    +
    +

    3.15.0-r1

    Added SQLITE_DBCONFIG_MAINDBNAME constant.

    -
    -

    3.14.1-r1

    +
    +

    3.14.1-r1

    Added SQLITE_DBSTATUS_CACHE_USED_SHARED and SQLITE_OK_LOAD_PERMANENTLY constants.

    -
    -

    3.13.0-r1

    +
    +

    3.13.0-r1

    Added SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION constant.

    Added a pip command line in the Download page.

    -
    -

    3.12.2-r1

    +
    +

    3.12.2-r1

    Call PyUnicode_READY for Python 3.3 onwards. Fixes APSW issue 208, APSW issue 132, APSW issue 168.

    SQLite 3.12 completely changed the semantics of VFS.xGetLastError() in an @@ -341,16 +423,16 @@ constants.

    Added support for SQLITE_CONFIG_STMTJRNL_SPILL in apsw.config().

    -
    -

    3.11.1-r1

    +
    +

    3.11.1-r1

    setup.py attempts to use setuptools if present, before falling back to distutils. This allows setuptools only commands such as bdist_wheel to work. You can force use of distutils by setting the environment variable APSW_FORCE_DISTUTILS to any value. Note that setuptools may also affect the output file names. (APSW issue 207)

    -
    -

    3.11.0-r1

    +
    +

    3.11.0-r1

    The shell dump command now outputs the page size and user version. They were both output before as comments.

    Updated SQLite download logic for 2016 folder.

    @@ -366,13 +448,13 @@

    Use SQLITE_ENABLE_API_ARMOR for extra error checking.

    -
    -

    3.9.2-r1

    +
    +

    3.9.2-r1

    Added SQLITE_IOERR_VNODE constant.

    Windows builds for Python 3.5 are now provided.

    -
    -

    3.8.11.1-r1

    +
    +

    3.8.11.1-r1

    Added SQLITE_FCNTL_RBU and SQLITE_FCNTL_ZIPVFS constants.

    setup’s fetch command can now get arbitrary fossil versions. For example specify fossil-e596a6b6.

    @@ -381,15 +463,15 @@ ValueError).

    Adjusted some internal detection related to the fork checker

    -
    -

    3.8.10.1-r1

    +
    +

    3.8.10.1-r1

    Added deterministic parameter to Connection.createscalarfunction() (APSW issue 187)

    Switched to new SQLite API returning 64 bit values for status() (APSW issue 191)

    -
    -

    3.8.9-r1

    +
    +

    3.8.9-r1

    Fixed column description caching which could be preserved between multiple statements in the same execution (APSW issue 186)

    Updated documentation building tool to use new database of information @@ -405,12 +487,12 @@

    Added mappings for conflict resolution modes, virtual table configuration options and xShmLock VFS flags.

    -
    -

    3.8.8.2-r1

    +
    +

    3.8.8.2-r1

    No APSW changes.

    -
    -

    3.8.8.1-r1

    +
    +

    3.8.8.1-r1

    The column description is now cached on first request during a query so getting it is quick if called for every row.

    Added SQLITE_CONFIG_PCACHE_HDRSZ and SQLITE_CONFIG_PMASZ constants, and @@ -418,90 +500,90 @@

    Added SQLITE_CHECKPOINT_TRUNCATE constant.

    Update year in various places to 2015.

    -
    -

    3.8.7.3-r1

    +
    +

    3.8.7.3-r1

    No APSW changes.

    -
    -

    3.8.7.2-r1

    +
    +

    3.8.7.2-r1

    Fixed parsing of icu-config flags

    -
    -

    3.8.7.1-r1

    +
    +

    3.8.7.1-r1

    Added SQLITE_LIMIT_WORKER_THREADS constant

    -
    -

    3.8.6-r1

    +
    +

    3.8.6-r1

    Updated test suite for Python 3.4 unittest garbage collection changes (APSW issue 164 APSW issue 169)

    Using the recommended build option of –enable-all-extensions turns on STAT4. Windows binaries include this too.

    -
    -

    3.8.5-r1

    +
    +

    3.8.5-r1

    Added SQLITE_IOCAP_IMMUTABLE and SQLITE_FCNTL_WIN32_SET_HANDLE constants.

    -
    -

    3.8.4.3-r1

    +
    +

    3.8.4.3-r1

    Added Cursor.fetchone()

    -
    -

    3.8.4.2-r1

    +
    +

    3.8.4.2-r1

    No APSW code changes. Rebuild due to updated SQLite version.

    -
    -

    3.8.4.1-r1

    +
    +

    3.8.4.1-r1

    Windows 64 bit binary builds for Python 3.3+ are back - thanks to Mike C. Fletcher for pointing the way

    Correct detection of current SQLite version from download page for setup.py fetch command

    Tested against Python 3.4 and binaries for Windows.

    -
    -

    3.8.3.1-r1

    +
    +

    3.8.3.1-r1

    Updated Shell completions for keywords, functions and pragmas.

    -
    -

    3.8.3-r1

    +
    +

    3.8.3-r1

    APSW is now hosted at Github - https://github.com/rogerbinns/apsw

    Added SQLITE_RECURSIVE, SQLITE_READONLY_DBMOVED, SQLITE_FCNTL_COMMIT_PHASETWO, SQLITE_FCNTL_HAS_MOVED and SQLITE_FCNTL_SYNC constants.

    -
    -

    3.8.2-r1

    +
    +

    3.8.2-r1

    Added SQLITE_CONFIG_WIN32_HEAPSIZE, SQLITE_CONSTRAINT_ROWID and SQLITE_FCNTL_TRACE constants.

    -
    -

    3.8.1-r1

    +
    +

    3.8.1-r1

    Added SQLITE_CANTOPEN_CONVPATH and SQLITE_IOERR_CONVPATH extended error codes.

    Updated pysqlite urls to point to github.

    Various minor build/download documentation updates.

    -
    -

    3.8.0.2-r1

    +
    +

    3.8.0.2-r1

    No APSW code changes. Rebuild due to updated SQLite version.

    Updated documentation tips to show how to get detailed diagnostics.

    -
    -

    3.8.0.1-r1

    +
    +

    3.8.0.1-r1

    No APSW changes. Rebuild due to updated SQLite version.

    Windows binaries for Python 3.3 64 bit are no longer available as a Visual Studio update obliterated the ability to compile them, and I have no patience left to fight Microsoft’s tools.

    -

    3.8.0-r2

    +

    3.8.0-r2

    No APSW changes - updated checksums because SQLite changed the released archive to address an autoconf issue on some platforms

    -
    -

    3.8.0-r1

    +
    +

    3.8.0-r1

    Windows binaries for Python 3.3 64 bit are now available after managing to get several pieces of Microsoft software to cooperate.

    Fixed shell dump issue when system routines (eg timestamp, username, @@ -510,45 +592,45 @@

    Added SQLITE_DBSTATUS_DEFERRED_FKS, SQLITE_IOERR_GETTEMPPATH, SQLITE_WARNING_AUTOINDEX and SQLITE_BUSY_SNAPSHOT constants.

    -
    -

    3.7.17-r1

    +
    +

    3.7.17-r1

    Removed tests that checked directly calling VFS read/write with negative offsets or amounts returns errors. This version of SQLite no longer returns errors in those circumstances and typically crashes instead.

    Various new constants.

    -
    -

    3.7.16.2-r1

    +
    +

    3.7.16.2-r1

    No APSW changes - just a binary rebuild. Windows users are recommended to upgrade their SQLite version.

    -
    -

    3.7.16.1-r1

    +
    +

    3.7.16.1-r1

    Updated tables of functions and pragmas in the Shell to match current SQLite version.

    -
    -

    3.7.16-r1

    +
    +

    3.7.16-r1

    Adjust to different SQLite download URLs

    Added SQLITE_CONSTRAINT_* and SQLITE_READONLY_ROLLBACK extended error codes

    Removed CouchDB virtual table

    -
    -

    3.7.15.2-r1

    +
    +

    3.7.15.2-r1

    No APSW changes - binary rebuild to pickup new SQLite version

    -
    -

    3.7.15.1-r1

    +
    +

    3.7.15.1-r1

    Use https (SSL) for SQLite web site references (downloads and documentation links). On some platforms/versions/SSL libraries, Python’s SSL module doesn’t work with the SQLite website so a fallback to http is used - the downloads still have their checksum verified.

    -
    -

    3.7.15-r1

    +
    +

    3.7.15-r1

    Work around changed semantics for error handling when the VFS xDelete method is asked to delete a file that does not exist.

    Completely removed all AsyncVFS related code. This extension @@ -561,8 +643,8 @@ SQLITE_FCNTL_TEMPFILENAME, SQLITE_CANTOPEN_FULLPATH, SQLITE_IOERR_DELETE_NOENT

    -
    -

    3.7.14.1-r1

    +
    +

    3.7.14.1-r1

    Updated setup and test suite so that all files are explicitly closed instead of relying on garbage collection.

    Added Windows binaries for Python 3.3. (Only 32 bit as Python doesn’t @@ -573,8 +655,8 @@ shell can result in bad data or Python crashing. The bug has been fixed for Python 3.3.1 which is due in November 2012.

    -
    -

    3.7.14-r2

    +
    +

    3.7.14-r2

    Fixed an issue with the GIL in the destructor for functions. The bug would be encountered if you create a function with the same name as an existing function and are using an upcoming version of Python (eg @@ -582,40 +664,40 @@ (APSW issue 134).

    Added shell .print command to match upcoming SQLite shell changes.

    -
    -

    3.7.14-r1

    +
    +

    3.7.14-r1

    Added support for Connection.status() (calls sqlite3_db_status).

    The legacy Windows Compiled Help Format documentation is no longer produced - the help compiler setup program can’t cope with modern machines.

    -
    -

    3.7.13-r1

    +
    +

    3.7.13-r1

    Do not free a structure on failure to register a virtual table module as SQLite does that anyway.

    Added SQLITE_OPEN_MEMORY constant.

    -
    -

    3.7.12.1-r1

    +
    +

    3.7.12.1-r1

    No changes to APSW. Binary rebuilds due to SQLite bugfixes.

    -
    -

    3.7.12-r1

    +
    +

    3.7.12-r1

    Re-enabled the asyncvfs.

    Added Cursor.description to make DB API interoperability a little easier (APSW issue 131).

    Added SQLITE_DBSTATUS_CACHE_WRITE and SQLITE_CANTOPEN_ISDIR constants.

    -
    -

    3.7.11-r1

    +
    +

    3.7.11-r1

    Added SQLITE_ABORT_ROLLBACK and SQLITE_FCNTL_PRAGMA constants.

    Added Connection.readonly().

    Changed Connection.filename which used to return the string used to open the database and now returns the absolute pathname.

    Added Connection.db_filename().

    -
    -

    3.7.10-r1

    +
    +

    3.7.10-r1

    The default sector size returned in VFS routines is 4,096 to match SQLite’s new default.

    Several links to SQLite tickets and documentation were updated @@ -642,21 +724,21 @@ VFS.xOpen() are exactly what was returned from VFS.xFullPathname().

    -
    -

    3.7.9-r1

    +
    +

    3.7.9-r1

    Added SQLITE_DBSTATUS_CACHE_HIT, SQLITE_DBSTATUS_CACHE_MISS and SQLITE_FCNTL_OVERWRITE constants.

    -
    -

    3.7.8-r1

    +
    +

    3.7.8-r1

    Updated documentation and tests due to an undocumented change in VFS xDelete semantics.

    Added SQLITE3_FCNTL_PERSIST_WAL and SQLITE3_FCNTL_WIN32_AV_RETRY file controls.

    Wrapped sqlite3_sourceid (APSW issue 120)

    -
    -

    3.7.7.1-r1

    +
    +

    3.7.7.1-r1

    Added SQLITE_CONFIG_URI and support for it in config(), and the open flag SQLITE_OPEN_URI. This makes it @@ -670,9 +752,9 @@ and Python 2: The Python int type is returned for 64 bit integers instead of Python long type.

    -
    -

    3.7.6.3-r1

    -

    When invoking the shell by calling apsw.main() it will not +

    +

    3.7.6.3-r1

    +

    When invoking the shell by calling apsw.shell.main() it will not become interactive if you supply SQL commands as command line arguments. This is to have the same behaviour as the SQLite shell (APSW issue 115).

    @@ -682,8 +764,8 @@ file automatically deducing separators, column names and data types.

    Detect attempted use of a cursor as input data for itself.

    -
    -

    3.7.6.2-r1

    +
    +

    3.7.6.2-r1

    Fixed APSW issue 117 where the shell could report an I/O error on changing output target for some operating systems. Thanks to Edzard Pasma for finding and diagnosing @@ -697,8 +779,8 @@ more fine grained control over checkpointing and returns useful information.

    -
    -

    3.7.5-r1

    +
    +

    3.7.5-r1

    Backwards incompatible change in SQLite 3.7.5 for handling of xFileControl(). If you implement this method in a VFS then you must return True or False to indicate if the operation was @@ -708,36 +790,36 @@ all.)

    Windows Python 3.2 binaries now available.

    -
    -

    3.7.4-r1

    +
    +

    3.7.4-r1

    Binary downloads for Windows 64 bit Python versions 2.6 and above including Python 3 are now available.

    apsw.softheaplimit() now uses sqlite3_soft_heap_limit64 so you can provide values larger than 2GB. It is now also able to return the previous value instead of None.

    Improve getting shell timer information for 64 bit Windows.

    -

    blob.reopen() is implemented.

    +

    Blob.reopen() is implemented.

    FTS4 is enabled and in the binary builds. Note that it is an augmentation of FTS3 rather than totally separate code and described in the SQLite documentation.

    -
    -

    3.7.3-r1

    +
    +

    3.7.3-r1

    You can read blobs into pre-existing buffers using -blob.readinto(). (This is more efficient than allocating new -buffers as blob.read() does and then copying.) (APSW issue 109).

    +Blob.readinto(). (This is more efficient than allocating new +buffers as Blob.read() does and then copying.) (APSW issue 109).

    Fixed bug with unicode output in CSV mode in the shell.

    sqlite_create_function_v2 now means that some housekeeping APSW did can be pushed back onto SQLite and the consequent deletion of some code

    -
    -

    3.7.2-r1

    +
    +

    3.7.2-r1

    No changes to APSW. Upgrading to this version of SQLite is recommended.

    -
    -

    3.7.1-r1

    +
    +

    3.7.1-r1

    Updated various constants including SQLITE_FCNTL_CHUNK_SIZE used with Connection.filecontrol().

    Fixed Unicode output with some file objects from the shell (APSW issue 108).

    @@ -748,12 +830,12 @@
    -
    -

    3.7.0.1-r1

    +
    +

    3.7.0.1-r1

    Fixed issue when using a tracer and a context manager fails to commit.

    -
    -

    3.7.0-r1

    +
    +

    3.7.0-r1

    Added several new constants.

    Write Ahead Logging is supported. You can make all databases automatically use @@ -766,7 +848,7 @@ out the problem and providing test data.

    The shell now does colour highlighting making it easy to visually distinguish prompts, errors, headers and value types when outputting -to a terminal. See the --no-colour argument and .colour +to a terminal. See the –no-colour argument and .colour command. Those of you in the two countries that have not adopted the metric system may also omit the ‘u’. For Windows users you won’t get colour output unless you install colorama

    @@ -793,16 +875,16 @@ the statement cache was initialised which would result in a crash if any hooks executed SQL code.

    -
    -

    3.6.23.1-r1

    +
    +

    3.6.23.1-r1

    Shell CSV output under Python 3.1 is corrected (work around Python 3.1 StringIO bug/incompatibility with other Python versions).

    -

    Simplified access to the shell’s database from the +

    Simplified access to the shell’s database from the API.

    Added a shell example.

    -
    -

    3.6.23-r1

    +
    +

    3.6.23-r1

    If setup is downloading files and an error occurs then it retries up to 5 times.

    Added SQLITE_CONFIG_LOG and SQLITE_OPEN_AUTOPROXY constants.

    @@ -811,8 +893,8 @@

    Added log() to call the SQLite logging interface, and updated config() so you can set log destination function.

    -
    -

    3.6.22-r1

    +
    +

    3.6.22-r1

    Made it possible to run distutils ‘sdist’ from an already produced source that was made from ‘sdist’. This was necessary for some Python virtual package environments. Note that the recursive result does not @@ -822,8 +904,8 @@ such as page size, encoding, auto_vacuum etc. The pragmas are commented out. APSW issue 90

    -
    -

    3.6.21-r1

    +
    +

    3.6.21-r1

    Source and binary files are now digitally signed which means you can verify they have not been tampered with. See Verifying your download for instructions.

    @@ -832,8 +914,8 @@

    Removed some unintentional logging code left in CouchDB virtual table code.

    -
    -

    3.6.20-r1

    +
    +

    3.6.20-r1

    Support for Python 3.0 has been dropped as it has been end of lifed. Use Python 3.1 onwards.

    Changes to how some statements are prepared to allow the new RANGE and @@ -865,8 +947,8 @@ by itself.

    -
    -

    3.6.19-r1

    +
    +

    3.6.19-r1

    Backwards incompatible change Fixed APSW issue 72 where APSW wasn’t zero basing virtual table BestIndex() constraints returned as documented. If you have working BestIndex code then you @@ -877,7 +959,7 @@ been all along. You should now call apsw.complete() instead. (It even had an example showing it to be part of the module and not a specific connection!)

    -

    There is now an interactive shell very similar to +

    There is now an interactive shell very similar to that provided by SQLite. You can embed it in your own program, inherit from it to provide more commands and output modes, or just run it like this:

    @@ -890,12 +972,12 @@

    The setup.py file now has the various options available made applicable to appropriate commands only. Read the updated documentation.

    -

    You can now specify build --enable=stat2 to setup.py +

    You can now specify build –enable=stat2 to setup.py to enable advanced statistics gathering for query planning.

    setup.py can automatically fetch the asyncvfs extension for you. If the source is present when APSW is built then -it will be automatically included and the API provided.

    +it will be automatically included and async_initialize called.

    A fork_checker() is available which turns on detection when you have used SQLite objects across a fork (a very bad thing). This is possible on Unix like operating systems, especially if you use the @@ -903,20 +985,20 @@

    Extension loading is now compiled in by default when using the amalgamation and compiled out when using existing libraries. This is more likely to match your machine. You can use ---omit=load_extension or --enable=load_extension +–omit=load_extension or –enable=load_extension to the build/build_ext commands to explicitly disable/enable extension loading. APSW issue 67

    setup.py will now abort on a download that has no checksum. See more information on checksums.

    setup.py can also fetch the version of SQLite currently under development before a release. Use ---version=fossil.

    +–version=fossil.

    Updated which code uses experimental SQLite APIs based on changes in SQLite. The test suite will also work correctly with experimental on or off. (It is on by default.)

    -
    -

    3.6.18-r1

    +
    +

    3.6.18-r1

    The APSW license has been updated to allow you (at your option) to use any OSI approved license.

    The speedtest has been updated to (optionally) use unicode @@ -925,12 +1007,12 @@ situations where it was not necessary. This results in the code executing a little faster.

    -
    -

    3.6.17-r1

    +
    +

    3.6.17-r1

    APSW has migrated from Subversion to Mercurial for source code control. Hosting remains at Google Code

    Updated a test due to VFS xUnlock errors now being ignored sometimes -by SQLite (SQLite ticket #3946).

    +by SQLite (cvstrac 3946).

    The downloads page in the help didn’t mention the Windows Python 3.1 installer.

    Running the test suite is now integrated into setup.py so you @@ -945,27 +1027,27 @@ Windows binary distribution.

    Various documentation updates.

    -
    -

    3.6.16-r1

    +
    +

    3.6.16-r1

    Windows binary distribution includes Python 3.1.

    Trivial tweaks to keep MSVC happy.

    -
    -

    3.6.15-r1

    -

    Fixed APSW issue 50 where blob.read() was returning None +

    +

    3.6.15-r1

    +

    Fixed APSW issue 50 where Blob.read() was returning None on end of file instead of the documented (and correct) empty string/bytes.

    Corrected spelling of option in apswtrace and only output CURSORFROM if SQL tracing is on.

    -
    -

    3.6.14.2-r1

    +
    +

    3.6.14.2-r1

    Updated test code because SQLite 3.6.15 returns a different error code on trying to register a function with too many arguments (see -SQLite ticket #3875).

    +cvstrac 3875).

    -
    -

    3.6.14.1-r1

    +
    +

    3.6.14.1-r1

    Changed some internal symbol names so they won’t clash with similar new ones used by SQLite in the amalgamation.

    Added apsw.using_amalgamation so you can tell if APSW was @@ -976,8 +1058,8 @@ we know it hasn’t been tampered with. (The –fetch-sqlite argument can be used to automatically download SQLite.)

    -
    -

    3.6.13-r1

    +
    +

    3.6.13-r1

    Added SQLITE_LOCKED_SHAREDCACHE extended error code.

    Updated tests as the VFS delete error handling code in SQLite now returns the same high level error code between Windows and @@ -985,8 +1067,8 @@

    The CHM format help file produced by the Windows HTML Help Compiler is viewable again under Windows HTML Help Viewer.

    -
    -

    3.6.11-r1

    +
    +

    3.6.11-r1

    You can now use the hot backup functionality introduced in SQLite 3.6.11.

    Updated a VFS test to reflect changes in SQLite underlying error handling. (Previously SQLite almost always returned FullError @@ -996,10 +1078,10 @@ (reincarnated). That is no longer the case and you will get CursorClosedError.

    -
    -

    3.6.10-r1

    +
    +

    3.6.10-r1

    You can use the database as a context manager -as defined in PEP 0343. When you use with a transaction is +as defined in PEP 0343. When you use with a transaction is started. If the block finishes with an exception then the transaction is rolled back, otherwise it is committed. See Connection.__enter__() for an example.

    @@ -1008,9 +1090,9 @@ blocks can be nested. If you use Connection level execution tracers then they will be called with the savepoint SQL statements.

    -

    You can also use blobs as a context manager which +

    You can also use blobs as a context manager which ensures it is always closed when finished using it. See -blob.__enter__() for an example.

    +Blob.__enter__() for an example.

    Added constants:

      @@ -1035,8 +1117,8 @@

      The speedtest script will now fallback to the Python builtin sqlite3 module if it can’t find an externally installed pysqlite.

    -
    -

    3.6.6.2-r1

    +
    +

    3.6.6.2-r1

    Windows binary download for Python 3.0 is available.

    Various changes in data structures and containers to reduce code size.

    Changed the code to handle SQLite errors to only use Python @@ -1045,7 +1127,7 @@ compatible with XP. Thanks to Rudolf Gaertner for assistance in detecting and diagnosing this issue.

    Connections, cursors and -blobs can be used by weak references.

    +blobs can be used by weak references.

    You can now install Connection wide execution and row tracers.

    The callbacks for execution and row tracers have a different signature @@ -1061,20 +1143,20 @@

    Added a apswtrace script to allow easy SQL tracing without having to modify your code.

    Revert to using older SQLite APIs in order to work around -SQLite ticket #2158. (This also saves a little bit of SQLite memory +cvstrac 2158. (This also saves a little bit of SQLite memory usage). The user visible effect was that you could get different exceptions and error text depending on whether a query was already in the statement cache or if you were multi-threading. As an example, if you have a query that used an unknown collation then SQLite’s prepare returns -SQLITE_ERROR with error text about the bad collation. If a +SQLITE_ERROR with error text about the bad collation. If a query had already been prepared, the collation removed and then run the new SQLite routines are -returning SQLITE_SCHEMA and generic schema changed error +returning SQLITE_SCHEMA and generic schema changed error text. Changing user defined functions could also cause a previously correct query to become invalid.

    -
    -

    3.6.5-r1

    +
    +

    3.6.5-r1

    The distribution now includes a speedtest script. You can use this to see how APSW performs relative to pysqlite, or to track performance differences between SQLite versions. The underlying @@ -1100,27 +1182,27 @@ where things are implemented and to make automatic extraction of documentation easier.

    -
    -

    3.6.3-r1

    +
    +

    3.6.3-r1

    You can now write your own Virtual File System (VFS) in Python. You can also inherit from an existing VFS making it easy to augment or override small bits of behaviour without having to code everything else. See the example where database files are obfuscated by XORing their contents.

    -

    setup.py now takes an optional --fetch-sqlite[=ver] +

    setup.py now takes an optional –fetch-sqlite[=ver] argument to automatically download and use the latest SQLite amalgamation (or a specified version). On non-Windows platforms it will also work out what compile flags SQLite needs (for example -HAVE_USLEEP, HAVE_LOCALTIME_R). Several other +HAVE_USLEEP, HAVE_LOCALTIME_R). Several other options to setup.py are also available to control enabling/omitting certains features and functionality. See building for further details.

    APSW checks that SQLite was compiled to be threadsafe

    Added new constants:

      -
    • SQLITE_IOERR_ACCESS, SQLITE_IOERR_CHECKRESERVEDLOCK and SQLITE_IOERR_LOCK extended result codes

    • -
    • SQLITE_OPEN_NOMUTEX and SQLITE_OPEN_FULLMUTEX open flags

    • -
    • Several new SQLITE_CONFIG and SQLITE_STATUS codes

    • +
    • SQLITE_IOERR_ACCESS, SQLITE_IOERR_CHECKRESERVEDLOCK and SQLITE_IOERR_LOCK extended result codes

    • +
    • SQLITE_OPEN_NOMUTEX and SQLITE_OPEN_FULLMUTEX open flags

    • +
    • Several new SQLITE_CONFIG and SQLITE_STATUS codes

    Wrapped several new SQLite apis:

      @@ -1142,28 +1224,28 @@ for numbers fitting in signed 32 bit. This only affects Python 2 as Python 3 uses long exclusively. Thanks to Joe Pham for reporting this as APSW issue 24

      -

      Added Connection.getsqlite3pointer() method to help with +

      Added Connection.sqlite3pointer() method to help with APSW issue 26

    -
    -

    3.5.9-r2

    +
    +

    3.5.9-r2

    APSW now works with Python 3 (you need 3.0b1 or later).

    (APSW issue 17) -Removed the SQLITE_MAX_* constants since they could be +Removed the SQLITE_MAX_ constants since they could be unreliable (eg APSW can’t tell what a shared library was compiled with). A workaround is documented in Connection.limit().

    -
    -

    3.5.9-r1

    +
    +

    3.5.9-r1

    APSW is now hosted at https://code.google.com/p/apsw

    You can use this with SQLite 3.5.9 onwards.

    SQLite now provides the source all amalgamated into one file which improves performance and makes compilation and linking of SQLite far easier. The build instructions are updated.

    -

    SQLITE_COPY authorizer code and SQLITE_PROTOCOL +

    SQLITE_COPY authorizer code and SQLITE_PROTOCOL error code are no longer used by SQLite, but the values are left in apsw for backwards compatibility

    -

    SQLITE_IOERR_DELETE, SQLITE_IOERR_BLOCKED and SQLITE_IOERR_NOMEM

    +

    SQLITE_IOERR_DELETE, SQLITE_IOERR_BLOCKED and SQLITE_IOERR_NOMEM

    Connection.interrupt() can be called from any thread

    SQLite has implementation limits on string and blob lengths (roughly constrained to fitting within a signed 32 bit integer - less than 2GB) @@ -1203,8 +1285,8 @@ pysqlite (100). You can however specify more or less as needed.

    Connection.collationneeded() was implemented.

    -
    -

    3.3.13-r1

    +
    +

    3.3.13-r1

    As of this release, APSW is now co-hosted with pysqlite meaning there is one site to go to for your Python SQLite bindings. (Both projects subsequently moved to Google Code.)

    @@ -1218,29 +1300,29 @@ cause misuse errors (internally SQLite started returned NULL pointers for those statements, and sqlite3_step didn’t like being passed the NULL pointer).

    -
  • Changed special handling of SQLITE_BUSY error to be the same +

  • Changed special handling of SQLITE_BUSY error to be the same as other errors. The special handling previously let you restart on receiving busy, but also hung onto statements which could result in other statements getting busy errors.

  • -
    -

    3.3.10-r1

    +
    +

    3.3.10-r1

    You can use this with SQLite 3.3.10 onwards.

    Added a statement cache that works in conjunction with the sqlite3_prepare_v2 API. A few issues were exposed in SQLite and hence you must use SQLite 3.3.10 or later.

    -
    -

    3.3.9-r1

    +
    +

    3.3.9-r1

    You can use this with SQLite 3.3.9 onwards.

    SQLite added sqlite3_prepare_v2 API. The net effect of this API update is that you will not get SQLITE_SCHEMA any more. SQLite will handle it internally.

    -
    -

    3.3.8-r1

    +
    +

    3.3.8-r1

    You can use this with SQLite 3.3.8 onwards. There was an incompatible API change for virtual tables in SQLite 3.3.8.

    Virtual tables updated for new api.

    @@ -1248,13 +1330,13 @@ also call close() on cursors, but it usually isn’t necessary.

    All strings are returned as unicode.

    -

    PyErr_WriteUnraisable() was used for errors in -destructors. Unfortunately it is almost completely useless, merely -printing str() of the object and exception. This doesn’t help in -finding where in your code the issue arose so you could fix it. An -internal APSW implementation generates a traceback and calls -sys.excepthook(), the default implementation of which prints the -exception and the traceback to sys.stderr.

    +

    PyErr_WriteUnraisable +was used for errors in destructors. Unfortunately it is almost +completely useless, merely printing str of the object and exception. +This doesn’t help in finding where in your code the issue arose so you +could fix it. An internal APSW implementation generates a traceback +and calls sys.excepthook(), the default implementation of which +prints the exception and the traceback to sys.stderr.

    Note

    @@ -1264,17 +1346,17 @@ location.

    -

    Authorizer codes SQLITE_CREATE_VTABLE, -SQLITE_DROP_VTABLE and SQLITE_FUNCTION added.

    +

    Authorizer codes SQLITE_CREATE_VTABLE, +SQLITE_DROP_VTABLE and SQLITE_FUNCTION added.

    SQLite extended result codes are available - see Exceptions for more detail.

    -

    Connection.hooks added so you can easily register functions, +

    apsw.connection_hooks added so you can easily register functions, virtual tables or similar items with each Connection as it is created.

    Added mapping dicts which makes it easy to map the various constants between strings and ints.

    -
    -

    3.3.7-r1

    +
    +

    3.3.7-r1

    Never released as 3.3.8 came along.

    You can use this release against SQLite 3.3.7. There were no changes in the SQLite 3.3.6 API from 3.3.5. In SQLite 3.3.7 an API was added @@ -1292,24 +1374,24 @@

  • You can load SQLite shared library extensions.

  • -
    -

    3.3.5-r1

    +
    +

    3.3.5-r1

    You can use this release against any release of SQLite 3 from 3.3.5 onwards. A bug was also fixed when reporting an error during the cleanup of an aggregate function if there had also been an error in -the step function. (PyErr_WriteUnraisable(NULL)() crashed on -some versions of Python but not others.)

    +the step function. (PyErr_WriteUnraisable(NULL) +crashed on some versions of Python but not others.)

    SQLite added several functions for returning metadata about result column sets. You have to compile SQLite with -SQLITE_ENABLE_COLUMN_METADATA to get them. This is not the +SQLITE_ENABLE_COLUMN_METADATA to get them. This is not the default for SQLite. I don’t believe these are generally useful except in some corner cases and so they aren’t wrapped. However please shout -if you do need them. Note that Cursor.getdescription() will +if you do need them. Note that Cursor.getdescription() will already give you generally useful information. (Also see the pragmas)

    The test code has been converted into using the unittest module. Run python tests.py -v to get the tests run. There should be no errors.

    -

    Updated code to work correctly with new Py_ssize_t introduced +

    Updated code to work correctly with new Py_ssize_t introduced in Python 2.5. See 64 bit hosts, Python 2.5+ for more details on how Python and SQLite handle 64 bit sized items.

    The following functions were added to SQLite and are wrapped. They are @@ -1325,16 +1407,16 @@ long it took.

    -
    -

    3.2.7-r1

    +
    +

    3.2.7-r1

    You can use this release against any release of SQLite 3.

    SQLite 3.2.7 has several bug fixes. The undocumented experimental -function sqlite3_profile() was added, but it not present in apsw +function sqlite3_profile was added, but it not present in apsw yet.

    The author of pysqlite has improved it considerably since APSW was originally written. The differences section has been updated to reflect those improvements in pysqlite.

    -

    SQLITE_INTERNAL and SQLITE_NOTFOUND error codes are +

    SQLITE_INTERNAL and SQLITE_NOTFOUND error codes are not used according to 3.2.7 header file. They are still present in APSW for backwards compatibility.

    Changed the build instructions so configure is run on non-Windows @@ -1345,36 +1427,36 @@

    Changed when an error in the step function for an aggregate is reported due to limitations in SQLite.

    -
    -

    3.2.2-r1

    +
    +

    3.2.2-r1

    You can use this release against any release of SQLite 3.

    -

    SQLite 3.2.2 API removed sqlite3_global_recover(). That function +

    SQLite 3.2.2 API removed sqlite3_global_recover. That function was not wrapped in APSW. Note that SQLite 3.2.2 contains a bug fix that applies when you use 64 bit integer primary keys (32 bit ints are fine).

    -
    -

    3.2.1-r1

    +
    +

    3.2.1-r1

    You can use this release against any release of SQLite 3.

    There are no changes in APSW except to correct an error in the example code (collations are registered against the connection not the cursor)

    SQLite 3.2.1 had one addition in the stable C API, which was a new -function named sqlite3_global_recover(). That function is not +function named sqlite3_global_recover. That function is not applicable for wrapping in APSW.

    -
    -

    3.1.3-r1

    +
    +

    3.1.3-r1

    You can use this release against any release of SQLite 3.

    The text string returned by apsw.Error used to say “apsw.APSWException” and has been changed to “apsw.Error”. This is purely cosmetic and helps make clear what the class is. (The old string was what the original class name was in an earlier version of the code.)

    -

    Added SQLITE_ALTER_TABLE and SQLITE_REINDEX +

    Added SQLITE_ALTER_TABLE and SQLITE_REINDEX constants for the authorizer function. (These constants were introduced in SQLite 3.1.3).

    Changed various C++-isms into standard C (eg // comments and the -placing of some CHECK_THREAD macro calls).

    +placing of some CHECK_THREAD macro calls).

    Added module level function apswversion() which returns the version of APSW.

    SQLite 3.1.3 had no changes in the stable C API other than what is @@ -1383,34 +1465,34 @@ not wrapped by APSW. Please contact me if you believe they will remain in SQLite and you would like them wrapped:

      -
    • sqlite3_sleep() An alternative function which sleeps for a +

    • sqlite3_sleep An alternative function which sleeps for a specified number of milliseconds can be provided. By default SQLite just uses the standard operating system call.

    • -
    • sqlite3_expired() This function is internal to statement +

    • sqlite3_expired This function is internal to statement execution. It would apply to the implementation of Cursor.executemany() and could in theory provide a marginal improvement in performance.

    • -
    • A global variable sqlite3_temp_directory can be used before +

    • A global variable sqlite3_temp_directory can be used before any databases are opened to set where temporary files are created. By default SQLite just uses the standard operating system mechanisms.

    -

    3.0.8-r3

    +

    3.0.8-r3

    There are no functional changes. The only changes were to correct some variable names in the example code (they were cut and pasted from the test code which used different names) and to make the source zip file extract its contents into a sub-directory which is the more typical way of packaging that sort of thing.

    -
    -

    3.0.8-r2

    +
    +

    3.0.8-r2

    All remaining functionality in the C API for SQLite 3.0.8 is now available.

    Finished this documentation.

    -
    -

    3.0.8-r1

    +
    +

    3.0.8-r1

    Initial release

    @@ -1422,131 +1504,139 @@
    @@ -1580,15 +1670,15 @@
  • previous |
  • - + @@ -54,17 +56,17 @@
    -

    Connections to a database

    +

    Connections to a database

    A Connection encapsulates access to a database. You then use cursors to issue queries against the database.

    You can have multiple Connections open against the same database in the same process, across threads and in other processes.

    -

    Connection class

    +

    Connection class

    -class Connection(filename: str, flags: int = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, vfs: Optional[str] = None, statementcachesize: int = 100)
    +class Connection(filename: str, flags: int = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, vfs: Optional[str] = None, statementcachesize: int = 100)

    This object wraps a sqlite3 pointer.

    Opens the named database. You can use :memory: to get a private temporary in-memory database that is not shared with any other connections.

    @@ -72,7 +74,7 @@
    Parameters
    • flags – One or more of the open flags orred together

    • -
    • vfs – The name of the vfs to use. If None then the default +

    • vfs – The name of the vfs to use. If None then the default vfs will be used.

    • statementcachesize – Use zero to disable the statement cache, or a number larger than the total distinct SQL statements you @@ -95,19 +97,19 @@

      Connection.__enter__() Connection

      You can use the database as a context manager -as defined in PEP 0343. When you use with a transaction is +as defined in PEP 0343. When you use with a transaction is started. If the block finishes with an exception then the transaction is rolled back, otherwise it is committed. For example:

      with connection:
      -    connection.cursor().execute("....")
      +    connection.execute("....")
           with connection:
               # nested is supported
               call_function(connection)
      -        connection.cursor().execute("...")
      +        connection.execute("...")
               with connection as db:
                   # You can also use 'as'
                   call_function2(db)
      -            db.cursor().execute("...")
      +            db.execute("...")
       

      Behind the scenes the savepoint functionality introduced in @@ -116,16 +118,49 @@

      -Connection.__exit__() Literal[False]
      +Connection.__exit__(etype: Optional[type[BaseException]], evalue: Optional[BaseException], etraceback: Optional[types.TracebackType]) Optional[bool]

      Implements context manager in conjunction with __enter__(). Any exception that happened in the with block is raised after committing or rolling back the savepoint.

      -
      +
      +
      +Connection.authorizer: Optional[Authorizer]
      +

      While preparing +statements, SQLite will call any defined authorizer to see if a +particular action is ok to be part of the statement.

      +

      Typical usage would be if you are running user supplied SQL and want +to prevent harmful operations. You should also +set the statementcachesize to zero.

      +

      The authorizer callback has 5 parameters:

      +
      +
        +
      • An operation code

      • +
      • A string (or None) dependent on the operation (listed as 3rd)

      • +
      • A string (or None) dependent on the operation (listed as 4th)

      • +
      • A string name of the database (or None)

      • +
      • Name of the innermost trigger or view doing the access (or None)

      • +
      +
      +

      The authorizer callback should return one of SQLITE_OK, +SQLITE_DENY or SQLITE_IGNORE. +(SQLITE_DENY is returned if there is an error in your +Python code).

      +
      +

      See also

      + +
      +

      Calls: sqlite3_set_authorizer

      +
      + +
      -Connection.autovacuum_pages(callable: Optional[Callable[[str, int, int, int], int]]) None
      +Connection.autovacuum_pages(callable: Optional[Callable[[str, int, int, int], int]]) None

      Calls callable to find out how many pages to autovacuum. The callback has 4 parameters:

      -
      +
      -Connection.backup(databasename: str, sourceconnection: Connection, sourcedatabasename: str) Backup
      +Connection.backup(databasename: str, sourceconnection: Connection, sourcedatabasename: str) Backup

      Opens a backup object. All data will be copied from source database to this database.

      @@ -167,9 +202,9 @@

      Calls: sqlite3_backup_init

      -
      +
      -Connection.blobopen(database: str, table: str, column: str, rowid: int, writeable: bool) Blob
      +Connection.blobopen(database: str, table: str, column: str, rowid: int, writeable: bool) Blob

      Opens a blob for incremental I/O.

      Parameters
      @@ -184,32 +219,109 @@
    Return type
    -

    blob

    +

    Blob

    Calls: sqlite3_blob_open

    -
    +
    +
    +Connection.cache_stats(include_entries: bool = False) Dict[str, int]
    +
    + +

    Returns information about the statement cache as dict.

    +
    +

    Note

    +

    Calling execute with “select a; select b; insert into c …” will +result in 3 cache entries corresponding to each of the 3 queries +present.

    +
    +

    The returned dictionary has the following information.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    Key

    Explanation

    size

    Maximum number of entries in the cache

    evictions

    How many entries were removed (expired) to make space for a newer +entry

    no_cache

    Queries that had can_cache parameter set to False

    hits

    A match was found in the cache

    misses

    No match was found in the cache, or the cache couldn’t be used

    no_vdbe

    The statement was empty (eg a comment) or SQLite took action +during parsing (eg some pragmas). These are not cached and also +included in the misses count

    too_big

    UTF8 query size was larger than considered for caching. These are also included +in the misses count.

    max_cacheable_bytes

    Maximum size of query (in bytes of utf8) that will be considered for caching

    entries

    (Only present if include_entries is True) A list of the cache entries

    +

    If entries is present, then each list entry is a dict with the following information.

    + + + + + + + + + + + + + + + + + + + + +

    Key

    Explanation

    query

    Text of the query itself (first statement only)

    prepare_flags

    Flags passed to sqlite3_prepare_v3 +for this query

    uses

    How many times this entry has been (re)used

    has_more

    Boolean indicating if there was more query text than +the first statement

    +
    -Connection.changes() int
    +Connection.changes() int

    Returns the number of database rows that were changed (or inserted or deleted) by the most recently completed INSERT, UPDATE, or DELETE statement.

    Calls: sqlite3_changes64

    -
    +
    -Connection.close(force: bool = False) None
    -

    Closes the database. If there are any outstanding cursors, blobs or backups then +Connection.close(force: bool = False) None +

    Closes the database. If there are any outstanding cursors, blobs or backups then they are closed too. It is normally not necessary to call this method as the database is automatically closed when there are no more references. It is ok to call the method multiple times.

    @@ -228,9 +340,9 @@

    Calls: sqlite3_close

    -
    +
    -Connection.collationneeded(callable: Optional[Callable[[Connection, str], None]]) None
    +Connection.collationneeded(callable: Optional[Callable[[Connection, str], None]]) None

    callable will be called if a statement requires a collation that hasn’t been registered. Your callable will be passed two parameters. The first is the connection object. The second is the name of the @@ -251,9 +363,9 @@

    Calls: sqlite3_collation_needed

    -
    +
    -Connection.config(op: int, *args: int) int
    +Connection.config(op: int, *args: int) int
    Parameters
    -
    +
    -Connection.createaggregatefunction(name: str, factory: Optional[AggregateFactory], numargs: int = - 1) None
    +Connection.createaggregatefunction(name: str, factory: Optional[AggregateFactory], numargs: int = -1) None

    Registers an aggregate function. Aggregate functions operate on all the relevant rows such as counting how many there are.

    @@ -306,16 +418,16 @@

    Calls: sqlite3_create_function_v2

    -
    +
    -Connection.createcollation(name: str, callback: Optional[Callable[[str, str], int]]) None
    +Connection.createcollation(name: str, callback: Optional[Callable[[str, str], int]]) None

    You can control how SQLite sorts (termed collation) when giving the COLLATE term to a SELECT. For example your collation could take into account locale or do numeric sorting.

    @@ -335,28 +447,28 @@

    See also

    Calls: sqlite3_create_collation_v2

    -
    +
    -Connection.createmodule(name: str, datasource: Any) None
    +Connection.createmodule(name: str, datasource: Any) None

    Registers a virtual table. See Virtual Tables for details.

    See also

    Calls: sqlite3_create_module_v2

    -
    +
    -Connection.createscalarfunction(name: str, callable: Optional[ScalarProtocol], numargs: int = - 1, deterministic: bool = False) None
    +Connection.createscalarfunction(name: str, callable: Optional[ScalarProtocol], numargs: int = -1, deterministic: bool = False) None

    Registers a scalar function. Scalar functions operate on one set of parameters once.

    Parameters
    @@ -388,7 +500,7 @@ @@ -406,26 +518,39 @@
    -
    +
    +
    +Connection.cursor_factory: Callable[[Connection], Any]
    +

    Defaults to Cursor

    +

    Called with a Connection as the only parameter when a cursor +is needed such as by the cursor() method, or +Connection.execute().

    +

    Note that whatever is returned doesn’t have to be an actual +Cursor instance, and just needs to have the methods present +that are actually called. These are likely to be execute, +executemany, close etc.

    +
    + +
    -Connection.db_filename(name: str) str
    +Connection.db_filename(name: str) str

    Returns the full filename of the named (attached) database. The main database is named “main”.

    Calls: sqlite3_db_filename

    -
    +
    -Connection.db_names() List[str]
    +Connection.db_names() List[str]

    Returns the list of database names. For example the first database is named ‘main’, the next ‘temp’, and the rest with the name provided in ATTACH

    Calls: sqlite3_db_name

    -
    +
    -Connection.deserialize(name: str, contents: bytes) None
    +Connection.deserialize(name: str, contents: bytes) None

    Replaces the named database with an in-memory copy of contents. name is “main” for the main database, “temp” for the temporary database etc.

    @@ -440,9 +565,9 @@

    Calls: sqlite3_deserialize

    -
    +
    -Connection.enableloadextension(enable: bool) None
    +Connection.enableloadextension(enable: bool) None

    Enables/disables extension loading which is disabled by default.

    @@ -459,9 +584,47 @@

    Calls: sqlite3_enable_load_extension

    -
    +
    +
    +Connection.exectrace: Optional[ExecTracer]
    +

    Called with the cursor, statement and bindings for +each execute() or executemany() on this +Connection, unless the Cursor installed its own +tracer. Your execution tracer can also abort execution of a +statement.

    +

    If callable is None then any existing execution tracer is +removed.

    +
    +

    See also

    + +
    +
    + +
    +
    +Connection.execute(statements: str, bindings: Optional[Bindings] = None, *, can_cache: bool = True, prepare_flags: int = 0) Cursor
    +

    Executes the statements using the supplied bindings. Execution +returns when the first row is available or all statements have +completed. (A cursor is automatically obtained).

    +

    See Cursor.execute() for more details.

    +
    + +
    +
    +Connection.executemany(statements: str, sequenceofbindings: Sequence[Bindings], *, can_cache: bool = True, prepare_flags: int = 0) Cursor
    +
    + +

    This method is for when you want to execute the same statements over a +sequence of bindings, such as inserting into a database. (A cursor is +automatically obtained).

    +

    See Cursor.executemany() for more details.

    +
    -Connection.filecontrol(dbname: str, op: int, pointer: int) bool
    +Connection.filecontrol(dbname: str, op: int, pointer: int) bool

    Calls the xFileControl() method on the Virtual File System (VFS) implementing file access for the database.

    @@ -513,49 +676,42 @@

    Calls: sqlite3_file_control

    -
    +
    -Connection.filename: str
    +Connection.filename: str

    The filename of the database.

    Calls: sqlite3_db_filename

    -
    +
    -Connection.getautocommit() bool
    +Connection.getautocommit() bool

    Returns if the Connection is in auto commit mode (ie not in a transaction).

    Calls: sqlite3_get_autocommit

    -Connection.getexectrace() Optional[ExecTracer]
    -

    Returns the currently installed (via setexectrace()) -execution tracer.

    -
    -

    See also

    - -
    +Connection.getexectrace() Optional[ExecTracer] +

    Returns the currently installed execution tracer

    -Connection.getrowtrace() Optional[RowTracer]
    -

    Returns the currently installed (via setrowtrace()) -row tracer.

    -
    -

    See also

    - -
    +Connection.getrowtrace() Optional[RowTracer] +

    Returns the currently installed row tracer

    -
    +
    +
    +Connection.in_transaction: bool
    +

    True if currently in a transaction, else False

    +

    Calls: sqlite3_get_autocommit

    +
    + +
    -Connection.interrupt() None
    +Connection.interrupt() None

    Causes any pending operations on the database to abort at the earliest opportunity. You can call this from any thread. For example you may have a long running query when the user presses the @@ -564,16 +720,16 @@

    Calls: sqlite3_interrupt

    -
    +
    -Connection.last_insert_rowid() int
    +Connection.last_insert_rowid() int

    Returns the integer key of the most recent insert in the database.

    Calls: sqlite3_last_insert_rowid

    -
    +
    -Connection.limit(id: int, newval: int = - 1) int
    +Connection.limit(id: int, newval: int = -1) int

    If called with one parameter then the current limit for that id is returned. If called with two then the limit is set to newval.

    @@ -590,15 +746,15 @@

    See also

    Calls: sqlite3_limit

    -
    +
    -Connection.loadextension(filename: str, entrypoint: Optional[str] = None) None
    +Connection.loadextension(filename: str, entrypoint: Optional[str] = None) None

    Loads filename as an extension

    Parameters
    @@ -625,19 +781,19 @@
    -Connection.open_flags: int
    +Connection.open_flags: int

    The integer flags used to open the database.

    -Connection.open_vfs: str
    +Connection.open_vfs: str

    The string name of the vfs used to open the database.

    -
    +
    -Connection.overloadfunction(name: str, nargs: int) None
    +Connection.overloadfunction(name: str, nargs: int) None

    Registers a placeholder function so that a virtual table can provide an implementation via VTTable.FindFunction().

    @@ -648,22 +804,41 @@
    -

    Due to SQLite ticket #3507 underlying errors will not be returned.

    +

    Due to cvstrac 3507 underlying errors will not be returned.

    Calls: sqlite3_overload_function

    -
    +
    -Connection.readonly(name: str) bool
    +Connection.readonly(name: str) bool

    True or False if the named (attached) database was opened readonly or file permissions don’t allow writing. The main database is named “main”.

    An exception is raised if the database doesn’t exist.

    Calls: sqlite3_db_readonly

    -
    +
    +
    +Connection.rowtrace: Optional[RowTracer]
    +

    Called with the cursor and row being returned for +cursors associated with this Connection, unless +the Cursor installed its own tracer. You can change the data that +is returned or cause the row to be skipped altogether.

    +

    If callable is None then any existing row tracer is +removed.

    +
    +

    See also

    + +
    +
    + +
    -Connection.serialize(name: str) bytes
    +Connection.serialize(name: str) bytes

    Returns a memory copy of the database. name is “main” for the main database, “temp” for the temporary database etc.

    The memory copy is the same as if the database was backed up to @@ -681,54 +856,26 @@

    Calls: sqlite3_serialize

    -
    +
    -Connection.set_last_insert_rowid(rowid: int) None
    +Connection.set_last_insert_rowid(rowid: int) None

    Sets the value calls to last_insert_rowid() will return.

    Calls: sqlite3_set_last_insert_rowid

    -
    +
    -Connection.setauthorizer(callable: Optional[Callable[[int, Optional[str], Optional[str], Optional[str], Optional[str]], int]]) None
    -

    While preparing -statements, SQLite will call any defined authorizer to see if a -particular action is ok to be part of the statement.

    -

    Typical usage would be if you are running user supplied SQL and want -to prevent harmful operations. You should also -set the statementcachesize to zero.

    -

    The authorizer callback has 5 parameters:

    -
    -
      -
    • An operation code

    • -
    • A string (or None) dependent on the operation (listed as 3rd)

    • -
    • A string (or None) dependent on the operation (listed as 4th)

    • -
    • A string name of the database (or None)

    • -
    • Name of the innermost trigger or view doing the access (or None)

    • -
    -
    -

    The authorizer callback should return one of SQLITE_OK, -SQLITE_DENY or SQLITE_IGNORE. -(SQLITE_DENY is returned if there is an error in your -Python code).

    -

    Passing None unregisters the existing authorizer.

    -
    -

    See also

    - -
    -

    Calls: sqlite3_set_authorizer

    +Connection.setauthorizer(callable: Optional[Authorizer]) None +

    Sets the authorizer

    -
    +
    -Connection.setbusyhandler(callable: Optional[Callable[[int], bool]]) None
    +Connection.setbusyhandler(callable: Optional[Callable[[int], bool]]) None

    Sets the busy handler to callable. callable will be called with one integer argument which is the number of prior calls to the busy callback for the same lock. If the busy callback returns False, -then SQLite returns SQLITE_BUSY to the calling code. If +then SQLite returns SQLITE_BUSY to the calling code. If the callback returns True, then SQLite tries to open the table again and the cycle repeats.

    If you previously called setbusytimeout() then @@ -744,9 +891,9 @@

    Calls: sqlite3_busy_handler

    -
    +
    -Connection.setbusytimeout(milliseconds: int) None
    +Connection.setbusytimeout(milliseconds: int) None

    If the database is locked such as when another connection is making changes, SQLite will keep retrying. This sets the maximum amount of time SQLite will keep retrying before giving up. If the database is @@ -768,9 +915,9 @@

    Calls: sqlite3_busy_timeout

    -
    +
    -Connection.setcommithook(callable: Optional[Callable[[], None]]) None
    +Connection.setcommithook(callable: Optional[CommitHook]) None

    callable will be called just before a commit. It should return False for the commit to go ahead and True for it to be turned into a rollback. In the case of an exception in your callable, a @@ -779,7 +926,7 @@

    See also

    Calls: sqlite3_commit_hook

    @@ -787,27 +934,13 @@
    -Connection.setexectrace(callable: Optional[ExecTracer]) None
    -

    callable is called with the cursor, statement and bindings for -each execute() or executemany() on this -Connection, unless the Cursor installed its own -tracer. Your execution tracer can also abort execution of a -statement.

    -

    If callable is None then any existing execution tracer is -removed.

    - +Connection.setexectrace(callable: Optional[ExecTracer]) None +

    Method to set Connection.exectrace

    -
    +
    -Connection.setprofile(callable: Optional[Callable[[str, int], None]]) None
    +Connection.setprofile(callable: Optional[Callable[[str, int], None]]) None

    Sets a callable which is invoked at the end of execution of each statement and passed the statement string and how long it took to execute. (The execution time is in nanoseconds.) Note that it is @@ -817,9 +950,9 @@

    Calls: sqlite3_profile

    -
    +
    -Connection.setprogresshandler(callable: Optional[Callable[[], bool]], nsteps: int = 20) None
    +Connection.setprogresshandler(callable: Optional[Callable[[], bool]], nsteps: int = 20) None

    Sets a callable which is invoked every nsteps SQLite inststructions. The callable should return True to abort or False to continue. (If there is an error in your Python callable @@ -833,46 +966,33 @@

    Calls: sqlite3_progress_handler

    -
    +
    -Connection.setrollbackhook(callable: Optional[Callable[[], None]]) None
    +Connection.setrollbackhook(callable: Optional[Callable[[], None]]) None

    Sets a callable which is invoked during a rollback. If callable -is None then any existing rollback hook is unregistered.

    +is None then any existing rollback hook is unregistered.

    The callable is called with no parameters and the return value is ignored.

    Calls: sqlite3_rollback_hook

    -Connection.setrowtrace(callable: Optional[RowTracer]) None
    -

    callable is called with the cursor and row being returned for -cursors associated with this Connection, unless -the Cursor installed its own tracer. You can change the data that -is returned or cause the row to be skipped altogether.

    -

    If callable is None then any existing row tracer is -unregistered.

    - +Connection.setrowtrace(callable: Optional[RowTracer]) None +

    Method to set Connection.rowtrace

    -
    +
    -Connection.setupdatehook(callable: Optional[Callable[[int, str, str, int], None]]) None
    +Connection.setupdatehook(callable: Optional[Callable[[int, str, str, int], None]]) None

    Calls callable whenever a row is updated, deleted or inserted. If -callable is None then any existing update hook is +callable is None then any existing update hook is unregistered. The update hook cannot make changes to the database while the query is still executing, but can record them for later use or apply them in a different connection.

    The update hook is called with 4 parameters:

    -
    type (int)

    SQLITE_INSERT, SQLITE_DELETE or SQLITE_UPDATE

    +
    type (int)

    SQLITE_INSERT, SQLITE_DELETE or SQLITE_UPDATE

    database name (string)

    This is main for the database or the name specified in ATTACH

    @@ -886,17 +1006,17 @@

    See also

    Calls: sqlite3_update_hook

    -
    +
    -Connection.setwalhook(callable: Optional[Callable[[Connection, str, int], int]]) None
    +Connection.setwalhook(callable: Optional[Callable[[Connection, str, int], int]]) None

    callable will be called just after data is committed in Write Ahead Logging -mode. It should return SQLITE_OK or an error code. The +mode. It should return SQLITE_OK or an error code. The callback is called with 3 parameters:

      @@ -911,21 +1031,21 @@
      -Connection.sqlite3pointer() int
      -

      Returns the underlying sqlite3 * for the connection. This +Connection.sqlite3pointer() int +

      + +

      Returns the underlying sqlite3 * for the connection. This method is useful if there are other C level libraries in the same -process and you want them to use the APSW connection handle. The -value is returned as a number using PyLong_FromVoidPtr() under the -hood. You should also ensure that you increment the reference count on -the Connection for as long as the other libraries are using -the pointer. It is also a very good idea to call +process and you want them to use the APSW connection handle. The value +is returned as a number using PyLong_FromVoidPtr +under the hood. You should also ensure that you increment the +reference count on the Connection for as long as the other +libraries are using the pointer. It is also a very good idea to call sqlitelibversion() and ensure it is the same as the other libraries.

      -
    - -
    +
    -Connection.status(op: int, reset: bool = False) Tuple[int, int]
    +Connection.status(op: int, reset: bool = False) Tuple[int, int]

    Returns current and highwater measurements for the database.

    Parameters
    @@ -948,26 +1068,26 @@

    Calls: sqlite3_db_status

    -
    +
    -Connection.totalchanges() int
    +Connection.totalchanges() int

    Returns the total number of database rows that have be modified, inserted, or deleted since the database connection was opened.

    Calls: sqlite3_total_changes64

    -
    +
    -Connection.txn_state(schema: Optional[str] = None) int
    +Connection.txn_state(schema: Optional[str] = None) int

    Returns the current transaction state of the database, or a specific schema if provided. ValueError is raised if schema is not None or a valid schema name. -apsw.mapping_txn_state contains the names and values returned.

    +apsw.mapping_txn_state contains the names and values returned.

    Calls: sqlite3_txn_state

    -
    +
    -Connection.wal_autocheckpoint(n: int) None
    +Connection.wal_autocheckpoint(n: int) None

    Sets how often the Write Ahead Logging checkpointing is run.

    Parameters
    @@ -978,9 +1098,9 @@

    Calls: sqlite3_wal_autocheckpoint

    -
    +
    -Connection.wal_checkpoint(dbname: Optional[str] = None, mode: int = apsw.SQLITE_CHECKPOINT_PASSIVE) Tuple[int, int]
    +Connection.wal_checkpoint(dbname: Optional[str] = None, mode: int = apsw.SQLITE_CHECKPOINT_PASSIVE) Tuple[int, int]

    Does a WAL checkpoint. Has no effect if the database(s) are not in WAL mode.

    Parameters
    @@ -1008,20 +1128,89 @@
    @@ -1058,15 +1247,15 @@
  • previous |
  • - + @@ -54,7 +56,7 @@
    @@ -127,15 +133,15 @@
  • previous |
  • - + @@ -54,9 +56,12 @@
    -

    Cursors (executing SQL)

    -

    A cursor encapsulates a SQL query and returning results. To make a -new cursor you should call cursor() on your +

    Cursors (executing SQL)

    +

    A cursor encapsulates a SQL query and returning results. You only need an +explicit cursor if you want more information or control over execution. Using +Connection.execute() or Connection.executemany() will automatically +obtain a cursor behind the scenes.

    +

    If you need a cursor you should call cursor() on your database:

    db=apsw.Connection("databasefilename")
     cursor=db.cursor()
    @@ -82,7 +87,7 @@
     
    sql="insert into example values(?, ?)"
     cursor.execute(sql, ("string", 8390823904))
     
    -# You can also use dictionaries
    +# You can also use dictionaries (with colon, $, or @ before names)
     sql="insert into example values(:title, :isbn)"
     cursor.execute(sql, {"title": "string", "isbn": 8390823904})
     
    @@ -95,10 +100,10 @@
     

    Cursors are cheap. Use as many as you need. It is safe to use them across threads, such as calling execute() in one thread, passing the cursor to another thread that then calls -Cursor.next(). The only thing you can’t do is call methods at +next. The only thing you can’t do is call methods at exactly the same time on the same cursor in two different threads - eg trying to call execute() in both at the same time, or -execute() in one and Cursor.next() in another. +execute() in one and next in another. (If you do attempt this, it will be detected and ThreadingViolationError will be raised.)

    Behind the scenes a Cursor maps to a SQLite statement. APSW maintains a @@ -112,8 +117,7 @@

    Note

    SQLite fetches data as it is needed. If table example had 10 -million rows it would only get the next row as requested (the for -loop effectively calls next() to get each row). This +million rows it would only get the next row as requested. This code would not work as expected:

    for row in cursor.execute("select * from example"):
        cursor.execute("insert .....")
    @@ -156,7 +160,7 @@
     
     
    -

    Cursor class

    +

    Cursor class

    class Cursor
    @@ -177,7 +181,7 @@
    -Cursor.close(force: bool = False) None
    +Cursor.close(force: bool = False) None

    It is very unlikely you will need to call this method. It exists because older versions of SQLite required all Connection/Cursor activity to be confined to the same thread. That is no longer the @@ -200,16 +204,60 @@

    +
    +Cursor.connection: Connection
    +

    Connection this cursor is using

    +
    + +
    -Cursor.description: Tuple[Tuple[str, str, None, None, None, None, None], ...]
    +Cursor.description: Tuple[Tuple[str, str, None, None, None, None, None], ...]

    Based on the DB-API cursor property, this returns the same as getdescription() but with 5 Nones appended. See also APSW issue 131.

    -
    +
    +
    +Cursor.description_full: Tuple[Tuple[str, str, str, str, str], ...]
    +
    + +

    Only present if SQLITE_ENABLE_COLUMN_METADATA was defined at +compile time.

    +

    Returns all information about the query result columns. In +addition to the name and declared type, you also get the database +name, table name, and origin name.

    +
    +
    Calls:
    +
    +
    +
    +
    +Cursor.exectrace: Optional[ExecTracer]
    +

    Called with the cursor, statement and bindings for +each execute() or executemany() on this +cursor.

    +

    If callable is None then any existing execution tracer is +unregistered.

    + +
    + +
    -Cursor.execute(statements: str, bindings: Optional[Bindings] = None) Cursor
    +Cursor.execute(statements: str, bindings: Optional[Bindings] = None, *, can_cache: bool = True, prepare_flags: int = 0) Cursor

    Executes the statements using the supplied bindings. Execution returns when the first row is available or all statements have completed.

    @@ -220,6 +268,10 @@ from books or begin; insert into books ...; select last_insert_rowid(); end.

  • bindings – If supplied should either be a sequence or a dictionary. Each item must be one of the supported types

  • +
  • can_cache – If False then the statement cache will not be used to find an already prepared query, nor will it be +placed in the cache after execution

  • +
  • prepare_flagsflags passed to +sqlite_prepare_v3

  • @@ -258,7 +310,7 @@
    Raises
      -
    • TypeError – The bindings supplied were neither a dict nor a sequence

    • +
    • TypeError – The bindings supplied were neither a dict nor a sequence

    • BindingsError – You supplied too many or too few bindings for the statements

    • IncompleteExecutionError – There are remaining unexecuted queries from your last execute

    @@ -268,12 +320,11 @@

    See also

    Calls:
      -
    • sqlite3_prepare_v2

    • +
    • sqlite3_prepare_v3

    • sqlite3_step

    • sqlite3_bind_int64

    • sqlite3_bind_null

    • @@ -288,7 +339,7 @@
      -Cursor.executemany(statements: str, sequenceofbindings: Sequence[Bindings]) Cursor
      +Cursor.executemany(statements: str, sequenceofbindings: Sequence[Bindings], *, can_cache: bool = True, prepare_flags: int = 0) Cursor

      This method is for when you want to execute the same statements over a sequence of bindings. Conceptually it does this:

      for binding in sequenceofbindings:
      @@ -309,9 +360,25 @@
       information.

      +
      +
      +Cursor.expanded_sql: str
      +

      The SQL text with bound parameters expanded. For example:

      +
      execute("select ?, ?", (3, "three"))
      +
      +
      +

      would return:

      +
      select 3, 'three'
      +
      +
      +

      Note that while SQLite supports nulls in strings, their implementation +of sqlite3_expanded_sql stops at the first null.

      +

      Calls: sqlite3_expanded_sql

      +
      +
      -Cursor.fetchall() list[Tuple[SQLiteValue, ...]]
      +Cursor.fetchall() list[Tuple[SQLiteValue, ...]]

      Returns all remaining result rows as a list. This method is defined in DBAPI. It is a longer way of doing list(cursor).

      @@ -325,18 +392,12 @@
      Cursor.getconnection() Connection
      -

      Returns the Connection this cursor belongs to. An example usage is to get another cursor:

      -
      def func(cursor):
      -  # I don't want to alter existing cursor, so make a new one
      -  mycursor=cursor.getconnection().cursor()
      -  mycursor.execute("....")
      -
      -
      +

      Returns the connection this cursor is using

      -
      +
      -Cursor.getdescription() Tuple[Tuple[str, str], ...]
      +Cursor.getdescription() Tuple[Tuple[str, str], ...]

      If you are trying to get information about a table or view, then pragma table_info is better.

      @@ -395,9 +456,8 @@
      -Cursor.getexectrace() Optional[ExecTracer]
      -

      Returns the currently installed (via setexectrace()) -execution tracer.

      +Cursor.getexectrace() Optional[ExecTracer] +

      Returns the currently installed execution tracer

      See also

        @@ -408,7 +468,7 @@
        -Cursor.getrowtrace() Optional[RowTracer]
        +Cursor.getrowtrace() Optional[RowTracer]

        Returns the currently installed (via setrowtrace()) row tracer.

        @@ -419,42 +479,52 @@
        -
        -
        -Cursor.setexectrace(callable: Optional[ExecTracer]) None
        -

        callable is called with the cursor, statement and bindings for -each execute() or executemany() on this -cursor.

        -

        If callable is None then any existing execution tracer is -unregistered.

        - +
        +
        +Cursor.is_explain: int
        +

        Returns 0 if executing a normal query, 1 if it is an EXPLAIN query, +and 2 if an EXPLAIN QUERY PLAN query.

        +

        Calls: sqlite3_stmt_isexplain

        -
        -
        -Cursor.setrowtrace(callable: Optional[RowTracer]) None
        -

        callable is called with cursor and row being returned. You can +

        +
        +Cursor.is_readonly: bool
        +

        Returns True if the current query does not change the database.

        +

        Note that called functions, virtual tables etc could make changes though.

        +

        Calls: sqlite3_stmt_readonly

        +
        + +
        +
        +Cursor.rowtrace: Optional[RowTracer]
        +

        Called with cursor and row being returned. You can change the data that is returned or cause the row to be skipped altogether.

        -

        If callable is None then any existing row tracer is +

        If callable is None then any existing row tracer is unregistered.

        +
        +
        +Cursor.setexectrace(callable: Optional[ExecTracer]) None
        +

        Sets the execution tracer

        +
        + +
        +
        +Cursor.setrowtrace(callable: Optional[RowTracer]) None
        +

        Sets the row tracer

        +
        +
    @@ -465,20 +535,52 @@
    @@ -515,15 +617,15 @@
  • previous |
  • - +
    @@ -218,20 +226,20 @@ modules |
  • - next |
  • previous |
  • - +
    @@ -222,17 +230,17 @@ next |
  • - previous |
  • - + @@ -53,417 +55,942 @@
    -
    -

    Example

    -

    This code demonstrates usage of the APSW api. It gives you a good -overview of all the things that can be done. Also included is output -so you can see what gets printed when you run the code.

    +
    +

    Example/Tour

    +

    This code demonstrates usage of APSW. It gives you a good overview of +all the things that can be done. Also included is output so you can +see what gets printed when you run the code.

    #!/usr/bin/env python3
     
    +from __future__ import annotations
    +
     import os
     import sys
     import time
     import apsw
    +import random
     
     # Note: this code uses Python's optional typing annotations.  You can
     # ignore them and do not need to use them
     from typing import Optional, Iterator, Tuple
    -
    -###
    -### Check we have the expected version of apsw and sqlite
    -###
    -
    -print("      Using APSW file", apsw.__file__)  # from the extension module
    -print("         APSW version", apsw.apswversion())  # from the extension module
    -print("   SQLite lib version", apsw.sqlitelibversion())  # from the sqlite library code
    -print("SQLite header version", apsw.SQLITE_VERSION_NUMBER)  # from the sqlite header file at compile time
     
    -
    |       Using APSW file /space/apsw/apsw/__init__.cpython-310-x86_64-linux-gnu.so
    -|          APSW version 3.39.2.0
    -|    SQLite lib version 3.39.2
    -| SQLite header version 3039002
    +
    +

    Checking APSW and SQLite versions

    +
    # Where the extension module is on the filesystem
    +print("      Using APSW file", apsw.__file__)
    +
    +# From the extension
    +print("         APSW version", apsw.apswversion())
    +
    +# From the sqlite header file at APSW compile time
    +print("SQLite header version", apsw.SQLITE_VERSION_NUMBER)
    +
    +# The SQLite code running
    +print("   SQLite lib version", apsw.sqlitelibversion())
    +
    +# If True then SQLite is incorporated into the extension.
    +# If False then a shared library is being used, or static linking
    +print("   Using amalgamation", apsw.using_amalgamation)
    +
    +
    +
          Using APSW file /space/apsw/./apsw/__init__.cpython-310-x86_64-linux-gnu.so
    +         APSW version 3.40.0.0
    +SQLite header version 3040000
    +   SQLite lib version 3.40.0
    +   Using amalgamation True
     
    -
    ###
    -### Opening/creating database
    -###
    -
    +
    +
    +

    Opening the database

    +

    You open the database by using Connection

    +
    # Default will create the database if it doesn't exist
     connection = apsw.Connection("dbfile")
    -cursor = connection.cursor()
    -
    -
    -
    ###
    -### simple statement
    -###
    -
    -cursor.execute("create table foo(x,y,z)")
    -
    -###
    -### using different types
    -###
    -
    -cursor.execute("insert into foo values(?,?,?)", (1, 1.1, None))  # integer, float/real, Null
    -cursor.execute("insert into foo(x) values(?)", ("abc", ))  # string (note trailing comma to ensure tuple!)
    -cursor.execute(
    -    "insert into foo(x) values(?)",  # a blob (binary data)
    -    (b"abc\xff\xfe", ))
    -
    -###
    -### multiple statements
    -###
    -
    -cursor.execute(
    -    "delete from foo; insert into foo values(1,2,3); create table bar(a,b,c) ; insert into foo values(4, 'five', 6.0)")
     
    -###
    -### iterator
    -###
    +# Open existing read-only
    +connection = apsw.Connection("dbfile", flags=apsw.SQLITE_OPEN_READONLY)
     
    -for x, y, z in cursor.execute("select x,y,z from foo"):
    -    print(cursor.getdescription())  # shows column names and declared types
    -    print(x, y, z)
    -
    -###
    -### iterator - multiple statements
    -###
    -
    -for m, n, o in cursor.execute("select x,y,z from foo ; select a,b,c from bar"):
    -    print(m, n, o)
    -
    -###
    -### bindings - sequence
    -###
    -
    -cursor.execute("insert into foo values(?,?,?)", (7, 'eight', False))
    -cursor.execute("insert into foo values(?,?,?1)", ('one', 'two'))  # nb sqlite does the numbers from 1
    -
    -###
    -### bindings - dictionary
    -###
    -
    -cursor.execute("insert into foo values(:alpha, :beta, :gamma)", {'alpha': 1, 'beta': 2, 'gamma': 'three'})
    +# Open existing read-write (exception if it doesn't exist)
    +connection = apsw.Connection("dbfile", flags=apsw.SQLITE_OPEN_READWRITE)
     
    -
    ###
    -### tracing execution
    -###
    +
    +
    +

    Executing SQL

    +

    Use Connection.execute() to execute SQL

    +
    connection.execute("create table point(x,y,z)")
    +connection.execute("insert into point values(1, 2, 3)")
    +# You can use multiple ; separated statements
    +connection.execute("""
    +    insert into point values(4, 5, 6);
    +    create table log(timestamp, event);
    +    create table foo(a, b, c);
    +    create table important(secret, data);
    +""")
     
    -def mytrace(cursor: apsw.Cursor, statement: str, bindings: Optional[apsw.Bindings]) -> bool:
    -    "Called just before executing each statement"
    -    print("SQL:", statement)
    -    if bindings:
    -        print("Bindings:", bindings)
    -    return True  # if you return False then execution is aborted
    +# read rows
    +for row in connection.execute("select * from point"):
    +    print(row)
    +
    +
    +
    (1, 2, 3)
    +(4, 5, 6)
    +
    +
    +
    +
    +

    Why you use bindings to provide values

    +

    It is tempting to compose strings with the values in them, but it is +easy to mangle the query especially if values contain punctuation +and unicode. It is known as SQL injection. Bindings are the +correct way to supply values to queries.

    +
    # a simple value
    +event = "system started"
    +# DO NOT DO THIS
    +query = "insert into log values(0, '" + event + "')"
    +print("query:", query)
    +
    +# BECAUSE ... a bad guy could provide a value like this
    +event = "bad guy here') ; drop table important; -- "
    +# which has effects like this
    +query = "insert into log values(0, '" + event + "')"
    +print("bad guy:", query)
    +
    +
    +
    query: insert into log values(0, 'system started')
    +bad guy: insert into log values(0, 'bad guy here') ; drop table important; -- ')
    +
    +
    +
    +
    +

    Bindings (sequence)

    +

    Bindings can be provided as a sequence such as with +a tuple or list. Use ? to show where the values go.

    +
    query = "insert into log values(?, ?)"
    +data = (7, "restart")
    +connection.execute(query, data)
    +
    +# You can also use numbers after the ? to select
    +# values from the sequence.  Note that numbering
    +# starts at 1
    +query = "select ?1, ?3, ?2"
    +data = ("alpha", "beta", "gamma")
    +for row in connection.execute(query, data):
    +    print(row)
    +
    +
    +
    ('alpha', 'gamma', 'beta')
    +
    +
    +
    +
    +

    Bindings (dict)

    +

    You can also supply bindings with a dictionary. Use :NAME, +@NAME, or $NAME, to provide the key name in the query. +Names are case sensitive.

    +
    query = "insert into point values(:x, @Y, $z)"
    +data = {"x": 7, "Y": 8, "z": 9}
    +connection.execute(query, data)
    +
    +
    +
    +
    +

    Using different types

    +

    SQLite supports None, int, float, str, bytes (binary data). If a +table declaration gives a type then SQLite attempts conversion. +Read more.

    +
    connection.execute("""
    +    create table types1(a, b, c, d, e);
    +    create table types2(a INTEGER, b REAL, c TEXT, d, e BLOB);
    +    """)
    +
    +data = ("12", 3, 4, 5.5, b"\x03\x72\xf4\x00\x9e")
    +connection.execute("insert into types1 values(?,?,?,?,?)", data)
    +connection.execute("insert into types2 values(?,?,?,?,?)", data)
    +
    +for row in connection.execute("select * from types1"):
    +    print("types1", repr(row))
     
    -cursor.setexectrace(mytrace)
    -cursor.execute("drop table bar ; create table bar(x,y,z); select * from foo where x=?", (3, ))
    +for row in connection.execute("select * from types2"):
    +    print("types2", repr(row))
     
    -
    | SQL: drop table bar ;
    -| SQL:  create table bar(x,y,z);
    -| SQL:  select * from foo where x=?
    -| Bindings: (3,)
    +
    types1 ('12', 3, 4, 5.5, b'\x03r\xf4\x00\x9e')
    +types2 (12, 3.0, '4', 5.5, b'\x03r\xf4\x00\x9e')
     
    -
    ###
    -### tracing results
    -###
    +
    +
    +

    Transactions

    +

    By default each statement is its own transaction (3 in the +example below). A transaction finishes by flushing data to +storage and waiting for the operating system to confirm it is +permanently there (ie will survive a power failure) which takes +a while.

    +
    connection.execute("insert into point values(2, 2, 2)")
    +connection.execute("insert into point values(3, 3, 3)")
    +connection.execute("insert into point values(4, 4, 4)")
    +
    +# You can use BEGIN / END to manually make a transaction
    +connection.execute("BEGIN")
    +connection.execute("insert into point values(2, 2, 2)")
    +connection.execute("insert into point values(3, 3, 3)")
    +connection.execute("insert into point values(4, 4, 4)")
    +connection.execute("END")
    +
    +# Or use `with`` that does it automatically
    +with connection:
    +    connection.execute("insert into point values(2, 2, 2)")
    +    connection.execute("insert into point values(3, 3, 3)")
    +    connection.execute("insert into point values(4, 4, 4)")
    +
    +# Nested transactions are supported
    +with connection:
    +    connection.execute("insert into point values(2, 2, 2)")
    +    with connection:
    +        connection.execute("insert into point values(3, 3, 3)")
    +        connection.execute("insert into point values(4, 4, 4)")
    +
    +
    +
    +
    +

    executemany

    +

    You can execute the same SQL against a sequence using +Connection.executemany()

    +
    data = (
    +    (1, 1, 1),
    +    (2, 2, 2),
    +    (3, 3, 3),
    +    (4, 4, 4),
    +    (5, 5, 5),
    +)
    +query = "insert into point values(?,?,?)"
    +
    +# we do it in a transaction
    +with connection:
    +    # the query is run for each item in data
    +    connection.executemany(query, data)
    +
    +
    +
    +
    +

    Tracing execution

    +

    You can trace execution of SQL statements. See more about +tracing.

    +
    def my_tracer(cursor: apsw.Cursor, statement: str, bindings: Optional[apsw.Bindings]) -> bool:
    +    "Called just before executing each statement"
    +    print("SQL:", statement.strip())
    +    print("Bindings:", bindings)
    +    return True  # if you return False then execution is aborted
    +
     
    -def rowtrace(cursor: apsw.Cursor, row: apsw.SQLiteValues) -> apsw.SQLiteValues:
    +# you can trace a single cursor
    +cursor = connection.cursor()
    +cursor.exectrace = my_tracer
    +cursor.execute(
    +    """
    +        drop table if exists bar;
    +        create table bar(x,y,z);
    +        select * from point where x=?;
    +        """, (3, ))
    +
    +# if set on a connection then all cursors are traced
    +connection.exectrace = my_tracer
    +# and clearing it
    +connection.exectrace = None
    +
    +
    +
    SQL: drop table if exists bar;
    +Bindings: ()
    +SQL: create table bar(x,y,z);
    +Bindings: ()
    +SQL: select * from point where x=?;
    +Bindings: (3,)
    +
    +
    +
    +
    +

    Tracing returned rows

    +

    You can trace returned rows, including modifying what is returned or +skipping it completely. See more about tracing.

    +
    def row_tracer(cursor: apsw.Cursor, row: apsw.SQLiteValues) -> apsw.SQLiteValues:
         """Called with each row of results before they are handed off.  You can return None to
         cause the row to be skipped or a different set of values to return"""
         print("Row:", row)
         return row
     
    -cursor.setrowtrace(rowtrace)
    -for row in cursor.execute("select x,y from foo where x>3"):
    +
    +# you can trace a single cursor
    +cursor = connection.cursor()
    +cursor.rowtrace = row_tracer
    +for row in cursor.execute("select x,y from point where x>4"):
         pass
    +
    +# if set on a connection then all cursors are traced
    +connection.rowtrace = row_tracer
    +# and clearing it
    +connection.rowtrace = None
     
    -
    | SQL: select x,y from foo where x>3
    -| Row: (4, 'five')
    -| Row: (7, 'eight')
    -| Row: ('one', 'two')
    -
    -
    -
    # Clear tracers
    -cursor.setrowtrace(None)
    -cursor.setexectrace(None)
    -
    -###
    -### executemany
    -###
    -
    -# (This will work correctly with multiple statements, as well as statements that
    -# return data.  The second argument can be anything that is iterable.)
    -cursor.executemany("insert into foo (x) values(?)", ([1], [2], [3]))
    -
    -# You can also use it for statements that return data
    -for row in cursor.executemany("select * from foo where x=?", ([1], [2], [3])):
    -    print(row)
    +
    Row: (7, 8)
    +Row: (5, 5)
     
    -
    ###
    -### defining your own functions
    -###
    -
    -def ilove7(*args: apsw.SQLiteValue) -> int:
    -    "a scalar function"
    -    print("ilove7 got", args, "but I love 7")
    +
    +
    +

    Defining your own functions

    +

    Scalar functions take one or more values and return one value. They +are registered by calling Connection.createscalarfunction().

    +
    def ilove7(*args: apsw.SQLiteValue) -> int:
    +    "A scalar function"
    +    print(f"ilove7 got { args } but I love 7")
         return 7
     
    +
     connection.createscalarfunction("seven", ilove7)
     
    -for row in cursor.execute("select seven(x,y) from foo"):
    -    print(row)
    +for row in connection.execute("select seven(x,y) from point where x>4"):
    +    print("row", row)
     
    -
    | ilove7 got (1, 2) but I love 7
    -| (7,)
    -| ilove7 got (4, 'five') but I love 7
    -| (7,)
    -| ilove7 got (7, 'eight') but I love 7
    -| (7,)
    -| ilove7 got ('one', 'two') but I love 7
    -| (7,)
    -| ilove7 got (1, 2) but I love 7
    -| (7,)
    -| ilove7 got (1, None) but I love 7
    -| (7,)
    -| ilove7 got (2, None) but I love 7
    -| (7,)
    -| ilove7 got (3, None) but I love 7
    -| (7,)
    -
    -
    -
    ###
    -### aggregate functions are more complex
    -###
    -
    -# Here we return the longest item when represented as a string.
    -
    -class longest:
    +
    ilove7 got (7, 8) but I love 7
    +row (7,)
    +ilove7 got (5, 5) but I love 7
    +row (7,)
    +
    +
    +
    +
    +

    Defining aggregate functions

    +

    Aggregate functions are called multiple times with matching rows, +and then provide a final value. An example is calculating an +average. They are registered by calling +Connection.createaggregatefunction().

    +
    class longest:
    +    # Find which value when represented as a string is
    +    # the longest
     
         def __init__(self) -> None:
             self.longest = ""
     
         def step(self, *args: apsw.SQLiteValue) -> None:
    +        # Called with each matching row
             for arg in args:
                 if len(str(arg)) > len(self.longest):
                     self.longest = str(arg)
     
         def final(self) -> str:
    +        # Called at the very end
             return self.longest
     
         @classmethod
         def factory(cls) -> apsw.AggregateCallbacks:
             return cls(), cls.step, cls.final
     
    +
     connection.createaggregatefunction("longest", longest.factory)
    -for row in cursor.execute("select longest(x,y) from foo"):
    +for row in connection.execute("select longest(event) from log"):
         print(row)
     
    -
    | ('eight',)
    +
    ('restart',)
     
    -
    ###
    -### Defining collations.
    -###
    +
    +
    +

    Defining collations (sorting)

    +

    How you sort can depend on the languages or values involved. You +register a collation by calling Connection.createcollation().

    +
    # This example sorting mechanisms understands some text followed by a
    +# number and ensures the number portion gets sorted correctly
    +
    +connection.execute("create table names(name)")
    +connection.executemany("insert into names values(?)", (
    +    ("file1", ),
    +    ("file7", ),
    +    ("file17", ),
    +    ("file20", ),
    +    ("file3", ),
    +))
     
    -# The default sorting mechanisms don't understand numbers at the end of strings
    -# so here we define a collation that does
    +print("Standard sorting")
    +for row in connection.execute("select * from names order by name"):
    +    print(row)
     
    -cursor.execute("create table s(str)")
    -cursor.executemany("insert into s values(?)", (["file1"], ["file7"], ["file17"], ["file20"], ["file3"]))
     
    -for row in cursor.execute("select * from s order by str"):
    -    print(row)
    -
    -
    -
    | ('file1',)
    -| ('file17',)
    -| ('file20',)
    -| ('file3',)
    -| ('file7',)
    -
    -
    -
    def strnumcollate(s1: apsw.SQLiteValue, s2: apsw.SQLiteValue) -> int:
    +def str_num_collate(s1: apsw.SQLiteValue, s2: apsw.SQLiteValue) -> int:
         # return -1 if s1<s2, +1 if s1>s2 else 0
     
    -    # split values into two parts - the head and the numeric tail
    -    values: list[tuple[str, int]] = [(str(s1), 0), (str(s2), 0)]
    -    for vn, v in enumerate(values):
    -        i = len(v[0])
    -        for i in range(len(v[0]), 0, -1):
    -            if v[0][i - 1] not in "01234567890":
    -                break
    -        try:
    -            v = (v[0][:i], int(v[0][i:]))
    -            values[vn] = v
    -        except ValueError:
    -            pass
    +    def parts(v: str) -> tuple[str, int]:
    +        num = ""
    +        while v and v[-1].isdigit():
    +            num = v[-1] + num
    +            v = v[:-1]
    +        return v, int(num) if num else 0
    +
    +    ps1 = parts(str(s1))
    +    ps2 = parts(str(s2))
    +
         # compare
    -    if values[0] < values[1]:
    +    if ps1 < ps2:
             return -1
    -    if values[0] > values[1]:
    +    if ps1 > ps2:
             return 1
         return 0
     
    -connection.createcollation("strnum", strnumcollate)
     
    -for row in cursor.execute("select * from s order by str collate strnum"):
    +connection.createcollation("strnum", str_num_collate)
    +
    +print()
    +print("Using strnum")
    +for row in connection.execute("select * from names order by name collate strnum"):
         print(row)
     
    -
    | ('file1',)
    -| ('file3',)
    -| ('file7',)
    -| ('file17',)
    -| ('file20',)
    +
    Standard sorting
    +('file1',)
    +('file17',)
    +('file20',)
    +('file3',)
    +('file7',)
    +
    +Using strnum
    +('file1',)
    +('file3',)
    +('file7',)
    +('file17',)
    +('file20',)
    +
    +
    +
    +
    +

    Accessing results by column name

    +

    You can access results by column name using dataclasses. +APSW provides apsw.ext.DataClassRowFactory for names +instead

    +
    import apsw.ext
    +
    +connection.execute("""
    +    create table books(id, title, author, year);
    +    insert into books values(7, "Animal Farm", "George Orwell", 1945);
    +    insert into books values(37, "The Picture of Dorian Gray", "Oscar Wilde", 1890);
    +    """)
    +
    +# Normally you use column numbers
    +for row in connection.execute("select title, id, year from books where author=?", ("Oscar Wilde", )):
    +    # this is very fragile
    +    print("title", row[0])
    +    print("id", row[1])
    +    print("year", row[2])
    +
    +# Turn on dataclasses - frozen makes them read-only
    +connection.rowtrace = apsw.ext.DataClassRowFactory(dataclass_kwargs={"frozen": True})
    +
    +print("\nNow with dataclasses\n")
    +
    +# Same query - note using AS to set column name
    +for row in connection.execute(
    +        """SELECT title,
    +           id AS book_id,
    +           year AS book_year
    +           FROM books WHERE author = ?""", ("Oscar Wilde", )):
    +    print("title", row.title)
    +    print("id", row.book_id)
    +    print("year", row.book_year)
    +
    +# clear
    +connection.rowtrace = None
    +
    +
    +
    title The Picture of Dorian Gray
    +id 37
    +year 1890
    +
    +Now with dataclasses
    +
    +title The Picture of Dorian Gray
    +id 37
    +year 1890
     
    -
    ###
    -### Authorizer (eg if you want to control what user supplied SQL can do)
    -###
    +
    +
    +

    Type conversion into/out of database

    +

    You can use apsw.ext.TypesConverterCursorFactory to do +conversion, both for types you define and for other types.

    +
    import apsw.ext
    +
    +registrar = apsw.ext.TypesConverterCursorFactory()
    +connection.cursor_factory = registrar
    +
    +
    +# A type we define - deriving from SQLiteTypeAdapter automatically registers conversion
    +# to a SQLite value
    +class Point(apsw.ext.SQLiteTypeAdapter):
    +
    +    def __init__(self, x, y):
    +        self.x = x
    +        self.y = y
    +
    +    def __repr__(self) -> str:
    +        return f"Point({ self.x }, { self.y })"
    +
    +    def __eq__(self, other: Point) -> bool:
    +        return isinstance(other, Point) and self.x == other.x and self.y == other.y
    +
    +    def to_sqlite_value(self) -> str:
    +        # called to convert Point into something SQLite supports
    +        return f"{ self.x };{ self.y }"
    +
    +    # This converter will be registered
    +    @staticmethod
    +    def convert_from_sqlite(value: str) -> Point:
    +        return Point(*(float(part) for part in value.split(";")))
     
    -def authorizer(operation: int, paramone: Optional[str], paramtwo: Optional[str], databasename: Optional[str], triggerorview: Optional[str]) -> int:
    +
    +# An existing type
    +def complex_to_sqlite_value(c: complex) -> str:
    +    return f"{ c.real }+{ c.imag }"
    +
    +
    +# ... requires manual registration
    +registrar.register_adapter(complex, complex_to_sqlite_value)
    +
    +# conversion from a SQLite value requires registration
    +registrar.register_converter("POINT", Point.convert_from_sqlite)
    +
    +
    +# ... and for complex
    +def sqlite_to_complex(v: str) -> complex:
    +    return complex(*(float(part) for part in v.split("+")))
    +
    +
    +registrar.register_converter("COMPLEX", sqlite_to_complex)
    +
    +# note that the type names are case sensitive and must match the
    +# registration
    +connection.execute("create table conversion(p POINT, c COMPLEX)")
    +
    +# convert going into database
    +test_data = (Point(5.2, 7.6), 3 + 4j)
    +connection.execute("insert into conversion values(?, ?)", test_data)
    +print("inserted", test_data)
    +
    +# and coming back out
    +for row in connection.execute("select * from conversion"):
    +    print("back out", row)
    +    print("equal", row == test_data)
    +
    +# clear registrar
    +connection.cursor_factory = apsw.Cursor
    +
    +
    +
    inserted (Point(5.2, 7.6), (3+4j))
    +back out (Point(5.2, 7.6), (3+4j))
    +equal True
    +
    +
    +
    +
    +

    Query details

    +

    apsw.ext.query_info() can provide a lot of information about a +query (without running it)

    +
    import apsw.ext
    +
    +# test tables
    +connection.execute("""
    +    create table customers(
    +        id INTEGER PRIMARY KEY,
    +        name CHAR,
    +        address CHAR);
    +    create table orders(
    +        id INTEGER PRIMARY KEY,
    +        customer_id INTEGER,
    +        item MY_OWN_TYPE);
    +    create index cust_addr on customers(address);
    +""")
    +
    +query = """
    +    SELECT * FROM orders
    +    JOIN customers ON orders.customer_id=customers.id
    +    WHERE address = ?;
    +    SELECT 7;"""
    +bindings = ("123 Main Street", )
    +
    +# ask for all information available
    +qd = apsw.ext.query_info(
    +    connection,
    +    query,
    +    bindings=bindings,
    +    actions=True,  # which tables/views etc and how they are accessed
    +    expanded_sql=True,  # expands bindings into query string
    +    explain=True,  # shows low level VDBE
    +    explain_query_plan=True,  # how SQLite solves the query
    +)
    +
    +# help with formatting
    +import pprint
    +
    +
    +print("query", qd.query)
    +print("\nbindings", qd.bindings)
    +print("\nexpanded_sql", qd.expanded_sql)
    +print("\nfirst_query", qd.first_query)
    +print("\nquery_remaining", qd.query_remaining)
    +print("\nis_explain", qd.is_explain)
    +print("\nis_readonly", qd.is_readonly)
    +print("\ndescription\n", pprint.pformat(qd.description))
    +if hasattr(qd, "description_full"):
    +    print("\ndescription_full\n", pprint.pformat(qd.description_full))
    +
    +
    +print("\nquery_plan\n", pprint.pformat(qd.query_plan))
    +print("\nFirst 5 actions\n", pprint.pformat(qd.actions[:5]))
    +print("\nFirst 5 explain\n", pprint.pformat(qd.explain[:5]))
    +
    +
    +
    query
    +    SELECT * FROM orders
    +    JOIN customers ON orders.customer_id=customers.id
    +    WHERE address = ?;
    +    SELECT 7;
    +
    +bindings ('123 Main Street',)
    +
    +expanded_sql
    +    SELECT * FROM orders
    +    JOIN customers ON orders.customer_id=customers.id
    +    WHERE address = '123 Main Street';
    +
    +first_query
    +    SELECT * FROM orders
    +    JOIN customers ON orders.customer_id=customers.id
    +    WHERE address = ?;
    +
    +
    +query_remaining SELECT 7;
    +
    +is_explain 0
    +
    +is_readonly True
    +
    +description
    + (('id', 'INTEGER'),
    + ('customer_id', 'INTEGER'),
    + ('item', 'MY_OWN_TYPE'),
    + ('id', 'INTEGER'),
    + ('name', 'CHAR'),
    + ('address', 'CHAR'))
    +
    +description_full
    + (('id', 'INTEGER', 'main', 'orders', 'id'),
    + ('customer_id', 'INTEGER', 'main', 'orders', 'customer_id'),
    + ('item', 'MY_OWN_TYPE', 'main', 'orders', 'item'),
    + ('id', 'INTEGER', 'main', 'customers', 'id'),
    + ('name', 'CHAR', 'main', 'customers', 'name'),
    + ('address', 'CHAR', 'main', 'customers', 'address'))
    +
    +query_plan
    + QueryPlan(detail='QUERY PLAN',
    +          sub=[QueryPlan(detail='SCAN orders', sub=None),
    +               QueryPlan(detail='SEARCH customers USING INTEGER PRIMARY KEY '
    +                                '(rowid=?)',
    +                         sub=None)])
    +
    +First 5 actions
    + [QueryAction(action=21,
    +             action_name='SQLITE_SELECT',
    +             column_name=None,
    +             database_name=None,
    +             file_name=None,
    +             function_name=None,
    +             module_name=None,
    +             operation=None,
    +             pragma_name=None,
    +             pragma_value=None,
    +             table_name=None,
    +             trigger_name=None,
    +             trigger_or_view=None,
    +             view_name=None),
    + QueryAction(action=20,
    +             action_name='SQLITE_READ',
    +             column_name='id',
    +             database_name='main',
    +             file_name=None,
    +             function_name=None,
    +             module_name=None,
    +             operation=None,
    +             pragma_name=None,
    +             pragma_value=None,
    +             table_name='orders',
    +             trigger_name=None,
    +             trigger_or_view=None,
    +             view_name=None),
    + QueryAction(action=20,
    +             action_name='SQLITE_READ',
    +             column_name='customer_id',
    +             database_name='main',
    +             file_name=None,
    +             function_name=None,
    +             module_name=None,
    +             operation=None,
    +             pragma_name=None,
    +             pragma_value=None,
    +             table_name='orders',
    +             trigger_name=None,
    +             trigger_or_view=None,
    +             view_name=None),
    + QueryAction(action=20,
    +             action_name='SQLITE_READ',
    +             column_name='item',
    +             database_name='main',
    +             file_name=None,
    +             function_name=None,
    +             module_name=None,
    +             operation=None,
    +             pragma_name=None,
    +             pragma_value=None,
    +             table_name='orders',
    +             trigger_name=None,
    +             trigger_or_view=None,
    +             view_name=None),
    + QueryAction(action=20,
    +             action_name='SQLITE_READ',
    +             column_name='id',
    +             database_name='main',
    +             file_name=None,
    +             function_name=None,
    +             module_name=None,
    +             operation=None,
    +             pragma_name=None,
    +             pragma_value=None,
    +             table_name='customers',
    +             trigger_name=None,
    +             trigger_or_view=None,
    +             view_name=None)]
    +
    +First 5 explain
    + [VDBEInstruction(addr=0,
    +                 opcode='Init',
    +                 comment=None,
    +                 p1=0,
    +                 p2=17,
    +                 p3=0,
    +                 p4=None,
    +                 p5=0),
    + VDBEInstruction(addr=1,
    +                 opcode='OpenRead',
    +                 comment=None,
    +                 p1=0,
    +                 p2=13,
    +                 p3=0,
    +                 p4='3',
    +                 p5=0),
    + VDBEInstruction(addr=2,
    +                 opcode='OpenRead',
    +                 comment=None,
    +                 p1=1,
    +                 p2=12,
    +                 p3=0,
    +                 p4='3',
    +                 p5=0),
    + VDBEInstruction(addr=3,
    +                 opcode='Rewind',
    +                 comment=None,
    +                 p1=0,
    +                 p2=16,
    +                 p3=0,
    +                 p4=None,
    +                 p5=0),
    + VDBEInstruction(addr=4,
    +                 opcode='Column',
    +                 comment=None,
    +                 p1=0,
    +                 p2=1,
    +                 p3=1,
    +                 p4=None,
    +                 p5=0)]
    +
    +
    +
    +
    +

    Blob I/O

    +

    BLOBS (binary large objects) are supported by SQLite. Note that you +cannot change the size of one, but you can allocate one filled with +zeroes, and then later open it and read / write the contents similar +to a file, without having the entire blob in memory. Use +Connection.blobopen() to open a blob.

    +
    connection.execute("create table blobby(x,y)")
    +# Add a blob we will fill in later
    +connection.execute("insert into blobby values(1, zeroblob(10000))")
    +# Or as a binding
    +connection.execute("insert into blobby values(2, ?)", (apsw.zeroblob(20000), ))
    +# Open a blob for writing.  We need to know the rowid
    +rowid = connection.execute("select ROWID from blobby where x=1").fetchall()[0][0]
    +blob = connection.blobopen("main", "blobby", "y", rowid, True)
    +blob.write(b"hello world")
    +blob.seek(2000)
    +blob.write(b"hello world, again")
    +blob.close()
    +
    +
    +
    +
    +

    Authorizer (control what SQL can do)

    +

    You can allow, deny, or ignore what SQL does. Use +Connection.authorizer to set an authorizer.

    +
    def auth(operation: int, p1: Optional[str], p2: Optional[str], db_name: Optional[str],
    +         trigger_or_view: Optional[str]) -> int:
         """Called when each operation is prepared.  We can return SQLITE_OK, SQLITE_DENY or
         SQLITE_IGNORE"""
         # find the operation name
    -    print(apsw.mapping_authorizer_function[operation], paramone, paramtwo, databasename, triggerorview)
    -    if operation == apsw.SQLITE_CREATE_TABLE and paramone and paramone.startswith("private"):
    +    print(apsw.mapping_authorizer_function[operation], p1, p2, db_name, trigger_or_view)
    +    if operation == apsw.SQLITE_CREATE_TABLE and p1 and p1.startswith("private"):
             return apsw.SQLITE_DENY  # not allowed to create tables whose names start with private
     
         return apsw.SQLITE_OK  # always allow
     
    -connection.setauthorizer(authorizer)
    -cursor.execute("insert into s values('foo')")
    -cursor.execute("select str from s limit 1")
    -
    -
    -
    | SQLITE_INSERT s None main None
    -| SQLITE_SELECT None None None None
    -| SQLITE_READ s str main None
    +
    +connection.authorizer = auth
    +connection.execute("insert into names values('foo')")
    +connection.execute("select name from names limit 1")
    +try:
    +    connection.execute("create table private_stuff(secret)")
    +    print("Created secret table!")
    +except Exception as e:
    +    print(e)
    +
    +# Clear authorizer
    +connection.authorizer = None
     
    -
    # Cancel authorizer
    -connection.setauthorizer(None)
    +
    SQLITE_INSERT names None main None
    +SQLITE_SELECT None None None None
    +SQLITE_READ names name main None
    +SQLITE_INSERT sqlite_master None main None
    +SQLITE_CREATE_TABLE private_stuff None main None
    +AuthError: not authorized
     
    -
    ###
    -### progress handler (SQLite 3 experimental feature)
    -###
    +
    +
    +

    Progress handler

    +

    Some operations (eg joins, sorting) can take many operations to +complete. Register a progress handler callback with +Connection.setprogresshandler() which lets you provide +feedback and allows cancelling.

    +
    def some_numbers(how_many: int) -> Iterator[Tuple[int]]:
    +    for _ in range(how_many):
    +        yield (random.randint(0, 9999999999), )
     
    -# something to give us large numbers of random numbers
    -import random
     
    -def randomintegers(howmany: int) -> Iterator[Tuple[int]]:
    -    for i in range(howmany):
    -        yield (random.randint(0, 9999999999), )
    +# create a table with random numbers
    +with connection:
    +    connection.execute("create table numbers(x)")
    +    connection.executemany("insert into numbers values(?)", some_numbers(100))
    +
     
    -# create a table with 100 random numbers
    -cursor.execute("begin ; create table bigone(x)")
    -cursor.executemany("insert into bigone values(?)", randomintegers(100))
    -cursor.execute("commit")
    -
    -# display an ascii spinner
    -_phcount = 0
    -_phspinner = "|/-\\"
    -
    -def progresshandler() -> int:
    -    global _phcount
    -    sys.stdout.write(_phspinner[_phcount % len(_phspinner)] + chr(8))  # chr(8) is backspace
    -    sys.stdout.flush()
    -    _phcount += 1
    -    time.sleep(0.1)  # deliberate delay so we can see the spinner (SQLite is too fast otherwise!)
    -    return 0  # returning non-zero aborts
    -
    -# register progresshandler every 20 instructions
    -connection.setprogresshandler(progresshandler, 20)
    -
    -# see it in action - sorting 100 numbers to find the biggest takes a while
    -print("spinny thing -> ", end="")
    -for i in cursor.execute("select max(x) from bigone"):
    -    print("\n", i, sep="", end="")
    -    sys.stdout.flush()
    +def progress_handler() -> bool:
    +    print("progress handler called")
    +    return False  # returning True aborts
     
    +
    +# register handler every 50 vdbe instructions
    +connection.setprogresshandler(progress_handler, 50)
    +
    +# Sorting the numbers to find the biggest
    +for max_num in connection.execute("select max(x) from numbers"):
    +    print(max_num)
    +
    +# Clear handler
     connection.setprogresshandler(None)
     
    -
    ###
    -### commit hook (SQLite3 experimental feature)
    -###
    -
    -def mycommithook() -> int:
    +
    progress handler called
    +progress handler called
    +progress handler called
    +progress handler called
    +progress handler called
    +progress handler called
    +progress handler called
    +progress handler called
    +(9996980943,)
    +
    +
    +
    +
    +

    Commit hook

    +

    A commit hook can allow or veto commits. Register a commit hook +with Connection.setcommithook().

    +
    def my_commit_hook() -> bool:
         print("in commit hook")
         hour = time.localtime()[3]
         if hour < 8 or hour > 17:
             print("no commits out of hours")
    -        return 1  # abort commits outside of 8am through 6pm
    +        return True  # abort commits outside of 8am through 6pm
         print("commits okay at this time")
    -    return 0  # let commit go ahead
    +    return False  # let commit go ahead
     
    -connection.setcommithook(mycommithook)
    +
    +connection.setcommithook(my_commit_hook)
     try:
    -    cursor.execute("begin; create table example(x,y,z); insert into example values (3,4,5) ; commit")
    +    with connection:
    +        connection.execute("create table example(x,y,z); insert into example values (3,4,5)")
     except apsw.ConstraintError:
         print("commit was not allowed")
     
     connection.setcommithook(None)
     
    -
    | in commit hook
    -| commits okay at this time
    +
    in commit hook
    +no commits out of hours
    +commit was not allowed
     
    -
    ###
    -### update hook
    -###
    -
    -def myupdatehook(type: int, databasename: str, tablename: str, rowid: int) -> None:
    -    print("Updated: %s database %s, table %s, row %d" %
    -          (apsw.mapping_authorizer_function[type], databasename, tablename, rowid))
    -
    -connection.setupdatehook(myupdatehook)
    -cursor.execute("insert into s values(?)", ("file93", ))
    -cursor.execute("update s set str=? where str=?", ("file94", "file93"))
    -cursor.execute("delete from s where str=?", ("file94", ))
    +
    +
    +

    Update hook

    +

    Update hooks let you know that data has been added, changed, or +removed. For example you could use this to discard cached +information. Register a hook using +Connection.setupdatehook().

    +
    def my_update_hook(type: int, db_name: str, table_name: str, rowid: int) -> None:
    +    op: str = apsw.mapping_authorizer_function[type]
    +    print(f"Updated: { op } db { db_name }, table { table_name }, rowid { rowid }")
    +
    +
    +connection.setupdatehook(my_update_hook)
    +connection.execute("insert into names values(?)", ("file93", ))
    +connection.execute("update names set name=? where name=?", ("file94", "file93"))
    +connection.execute("delete from names where name=?", ("file94", ))
    +
    +# Clear the hook
     connection.setupdatehook(None)
     
    -
    | Updated: SQLITE_INSERT database main, table s, row 7
    -| Updated: SQLITE_UPDATE database main, table s, row 7
    -| Updated: SQLITE_DELETE database main, table s, row 7
    +
    Updated: SQLITE_INSERT db main, table names, rowid 7
    +Updated: SQLITE_UPDATE db main, table names, rowid 7
    +Updated: SQLITE_DELETE db main, table names, rowid 7
     
    -
    ###
    -### Blob I/O
    -###
    +
    +
    +

    Virtual tables

    +

    Virtual tables let you provide data on demand as a SQLite table so +you can use SQL queries against that data. Read more about +virtual tables.

    +
    # This example provides information about all the files in Python's
    +# path.  The minimum amount of code needed is shown, and lets SQLite
    +# do all the heavy lifting.  A more advanced table would use indices
    +# and filters to reduce the number of rows shown to SQLite.
     
    -cursor.execute("create table blobby(x,y)")
    -# Add a blob we will fill in later
    -cursor.execute("insert into blobby values(1,zeroblob(10000))")
    -# Or as a binding
    -cursor.execute("insert into blobby values(2,?)", (apsw.zeroblob(20000), ))
    -# Open a blob for writing.  We need to know the rowid
    -rowid = next(cursor.execute("select ROWID from blobby where x=1"))[0]
    -blob = connection.blobopen("main", "blobby", "y", rowid, True)
    -blob.write(b"hello world")
    -blob.seek(2000)
    -blob.write(b"hello world, again")
    -blob.close()
    -
    -
    -
    ###
    -### Virtual tables
    -###
    +# these first columns are used by our virtual table
    +vtcolumns = ["rowid", "name", "directory"]
     
    -# This virtual table stores information about files in a set of
    -# directories so you can execute SQL queries
     
    -def getfiledata(directories):
    +def get_file_data(directories):
    +    "Returns a list of column names, and a list of all the files with their attributes"
         columns = None
         data = []
         counter = 1
    @@ -474,20 +1001,24 @@
                 counter += 1
                 st = os.stat(os.path.join(directory, f))
                 if columns is None:
    -                columns = ["rowid", "name", "directory"] + [x for x in dir(st) if x.startswith("st_")]
    +                # we add on all the fields from os.stat
    +                columns = vtcolumns + [x for x in dir(st) if x.startswith("st_")]
                 data.append([counter, f, directory] + [getattr(st, x) for x in columns[3:]])
         return columns, data
     
    +
     # This gets registered with the Connection
     class Source:
     
         def Create(self, db, modulename, dbname, tablename, *args):
    -        columns, data = getfiledata([eval(a.replace("\\", "\\\\")) for a in args])  # eval strips off layer of quotes
    +        # the eval strips off layer of quotes
    +        columns, data = get_file_data([eval(a.replace("\\", "\\\\")) for a in args])
             schema = "create table foo(" + ','.join(["'%s'" % (x, ) for x in columns[1:]]) + ")"
             return schema, Table(columns, data)
     
         Connect = Create
     
    +
     # Represents a table
     class Table:
     
    @@ -506,7 +1037,8 @@
     
         Destroy = Disconnect
     
    -# Represents a cursor
    +
    +# Represents a cursor used during SQL query processing
     class Cursor:
     
         def __init__(self, table):
    @@ -530,70 +1062,75 @@
         def Close(self):
             pass
     
    +
     # Register the module as filesource
     connection.createmodule("filesource", Source())
     
     # Arguments to module - all directories in sys.path
     sysdirs = ",".join(["'%s'" % (x, ) for x in sys.path[1:] if len(x) and os.path.isdir(x)])
    -cursor.execute("create virtual table sysfiles using filesource(" + sysdirs + ")")
    +connection.execute("create virtual table sysfiles using filesource(" + sysdirs + ")")
     
    -# Which 3 files are the biggest?
    -for size, directory, file in cursor.execute(
    +print("3 biggest files")
    +for size, directory, file in connection.execute(
             "select st_size,directory,name from sysfiles order by st_size desc limit 3"):
         print(size, file, directory)
    -
    -
    -
    | 46928312 d9e93640ccdc3fbe1e95__mypyc.cpython-310-x86_64-linux-gnu.so /home/rogerb/.local/lib/python3.10/site-packages
    -| 1246656 unicodedata2.cpython-310-x86_64-linux-gnu.so /usr/lib/python3/dist-packages
    -| 765704 _brotli.cpython-310-x86_64-linux-gnu.so /usr/lib/python3/dist-packages
    -
    -
    -
    # Which 3 files are the oldest?
    -for ctime, directory, file in cursor.execute("select st_ctime,directory,name from sysfiles order by st_ctime limit 3"):
    +
    +print()
    +print("3 oldest files")
    +for ctime, directory, file in connection.execute(
    +        "select st_ctime,directory,name from sysfiles order by st_ctime limit 3"):
         print(ctime, file, directory)
     
    -
    | 1582056231.6234083 .style.yapf /space/apsw
    -| 1587233567.4374056 arandr-0.1.10.egg-info /usr/lib/python3/dist-packages
    -| 1604593216.8926702 pyparsing.py /usr/lib/python3/dist-packages
    +
    3 biggest files
    +546696 _ctypes.cpython-310d-x86_64-linux-gnu.so /usr/lib/python3.10/lib-dynload
    +511328 _ssl.cpython-310d-x86_64-linux-gnu.so /usr/lib/python3.10/lib-dynload
    +450008 _testcapi.cpython-310d-x86_64-linux-gnu.so /usr/lib/python3.10/lib-dynload
    +
    +3 oldest files
    +1641597423.5541885 sitecustomize.py /usr/lib/python3.10
    +1664920933.397524 _gdbm.cpython-310-x86_64-linux-gnu.so /usr/lib/python3.10/lib-dynload
    +1664921015.6046188 _tkinter.cpython-310-x86_64-linux-gnu.so /usr/lib/python3.10/lib-dynload
     
    -
    ###
    -### A VFS that "obfuscates" the database file contents.  The scheme
    -### used is to xor all bytes with 0xa5.  This scheme honours that used
    -### for MAPI and SQL Server.
    -###
    +
    +
    +

    VFS - Virtual File System

    +

    VFS lets you control access to the filesystem from SQLite. APSW +makes it easy to “inherit” from an existing VFS and monitor or alter +data as it flows through. Read more about VFS.

    +
    # This example VFS "obfuscates" the database file contents by xor all
    +# bytes with 0xa5.  URI parameters are also shown as a way you can
    +# pass additional information for files.
     
    -def encryptme(data):
    +
    +def obfuscate(data):
         if not data: return data
         return bytes([x ^ 0xa5 for x in data])
     
    +
     # Inheriting from a base of "" means the default vfs
     class ObfuscatedVFS(apsw.VFS):
     
    -    def __init__(self, vfsname="obfu", basevfs=""):
    -        self.vfsname = vfsname
    -        self.basevfs = basevfs
    -        apsw.VFS.__init__(self, self.vfsname, self.basevfs)
    +    def __init__(self, vfsname="obfuscated", basevfs=""):
    +        self.vfs_name = vfsname
    +        self.base_vfs = basevfs
    +        apsw.VFS.__init__(self, self.vfs_name, self.base_vfs)
     
         # We want to return our own file implementation, but also
         # want it to inherit
    -    def xOpen(self, name, flags):
    -        # We can look at uri parameters
    +    def xOpen(self, name, flags: int):
             if isinstance(name, apsw.URIFilename):
    +            print("xOpen of", name.filename())
    +            # We can look at uri parameters
                 print("fast is", name.uri_parameter("fast"))
                 print("level is", name.uri_int("level", 3))
                 print("warp is", name.uri_boolean("warp", False))
                 print("notpresent is", name.uri_parameter("notpresent"))
    -
    -
    -
    | fast is speed
    -| level is 7
    -| warp is True
    -| notpresent is None
    -
    -
    -
            return ObfuscatedVFSFile(self.basevfs, name, flags)
    +        else:
    +            print("xOpen of", name)
    +        return ObfuscatedVFSFile(self.base_vfs, name, flags)
    +
     
     # The file implementation where we override xRead and xWrite to call our
     # encryption routine
    @@ -603,153 +1140,216 @@
             apsw.VFSFile.__init__(self, inheritfromvfsname, filename, flags)
     
         def xRead(self, amount, offset):
    -        return encryptme(super(ObfuscatedVFSFile, self).xRead(amount, offset))
    +        return obfuscate(super().xRead(amount, offset))
     
         def xWrite(self, data, offset):
    -        super(ObfuscatedVFSFile, self).xWrite(encryptme(data), offset)
    +        super().xWrite(obfuscate(data), offset)
    +
     
     # To register the VFS we just instantiate it
     obfuvfs = ObfuscatedVFS()
    +
     # Lets see what vfs are now available?
    -print(apsw.vfsnames())
    -
    -
    -
    | ['unix', 'obf', 'memdb', 'unix-excl', 'unix-dotfile', 'unix-none']
    -
    -
    -
    # Make an obfuscated db, passing in some URI parameters
    -obfudb = apsw.Connection("file:myobfudb?fast=speed&level=7&warp=on",
    -                         flags=apsw.SQLITE_OPEN_READWRITE | apsw.SQLITE_OPEN_CREATE | apsw.SQLITE_OPEN_URI,
    -                         vfs=obfuvfs.vfsname)
    +print("VFS available", apsw.vfsnames())
    +
    +# Make an obfuscated db, passing in some URI parameters
    +# default open flags
    +open_flags = apsw.SQLITE_OPEN_READWRITE | apsw.SQLITE_OPEN_CREATE
    +# add in using URI parameters
    +open_flags |= apsw.SQLITE_OPEN_URI
    +
    +obfudb = apsw.Connection("file:myobfudb?fast=speed&level=7&warp=on&another=true",
    +                         flags=open_flags,
    +                         vfs=obfuvfs.vfs_name)
    +
     # Check it works
    -obfudb.cursor().execute("create table foo(x,y); insert into foo values(1,2)")
    +obfudb.execute("create table foo(x,y); insert into foo values(1,2)")
     
     # Check it really is obfuscated on disk
    -print(repr(open("myobfudb", "rb").read()[:20]))
    -
    -
    -
    | b'\xf6\xf4\xe9\xcc\xd1\xc0\x85\xc3\xca\xd7\xc8\xc4\xd1\x85\x96\xa5\xb5\xa5\xa4\xa4'
    -
    -
    -
    # And unobfuscating it
    -print(repr(encryptme(open("myobfudb", "rb").read()[:20])))
    -
    -
    -
    | b'SQLite format 3\x00\x10\x00\x01\x01'
    -
    -
    -
    # Tidy up
    +print("What is on disk", repr(open("myobfudb", "rb").read()[:20]))
    +
    +# And unobfuscating it
    +print("Unobfuscated disk", repr(obfuscate(open("myobfudb", "rb").read()[:20])))
    +
    +# Tidy up
     obfudb.close()
     os.remove("myobfudb")
     
    -
    ###
    -### Limits
    -###
    -
    -# Print some limits
    +
    VFS available ['unix', 'obfuscated', 'memdb', 'unix-excl', 'unix-dotfile', 'unix-none']
    +xOpen of /space/apsw/myobfudb
    +fast is speed
    +level is 7
    +warp is True
    +notpresent is None
    +xOpen of /space/apsw/myobfudb-journal
    +xOpen of /space/apsw/myobfudb-journal
    +What is on disk b'\xf6\xf4\xe9\xcc\xd1\xc0\x85\xc3\xca\xd7\xc8\xc4\xd1\x85\x96\xa5\xb5\xa5\xa4\xa4'
    +Unobfuscated disk b'SQLite format 3\x00\x10\x00\x01\x01'
    +
    +
    +
    +
    +

    Limits

    +

    SQLite lets you see and update various limits via +Connection.limit()

    +
    # Print some limits
     for limit in ("LENGTH", "COLUMN", "ATTACHED"):
         name = "SQLITE_LIMIT_" + limit
    -    maxname = "SQLITE_MAX_" + limit  # compile time
    +    max_name = "SQLITE_MAX_" + limit  # compile time limit
         orig = connection.limit(getattr(apsw, name))
         print(name, orig)
         # To get the maximum, set to 0x7fffffff and then read value back
         connection.limit(getattr(apsw, name), 0x7fffffff)
         max = connection.limit(getattr(apsw, name))
    -    print(maxname, max)
    +    print(max_name, " ", max)
     
     # Set limit for size of a string
    -cursor.execute("create table testlimit(s)")
    -cursor.execute("insert into testlimit values(?)", ("x" * 1024, ))  # 1024 char string
    +connection.execute("create table testlimit(s)")
    +connection.execute("insert into testlimit values(?)", ("x" * 1024, ))  # 1024 char string
     connection.limit(apsw.SQLITE_LIMIT_LENGTH, 1023)  # limit is now 1023
     try:
    -    cursor.execute("insert into testlimit values(?)", ("y" * 1024, ))
    +    connection.execute("insert into testlimit values(?)", ("y" * 1024, ))
         print("string exceeding limit was inserted")
     except apsw.TooBigError:
         print("Caught toobig exception")
    +
    +# reset back to largest value
     connection.limit(apsw.SQLITE_LIMIT_LENGTH, 0x7fffffff)
     
    -
    | SQLITE_LIMIT_LENGTH 1000000000
    -| SQLITE_MAX_LENGTH 1000000000
    -| SQLITE_LIMIT_COLUMN 2000
    -| SQLITE_MAX_COLUMN 2000
    -| SQLITE_LIMIT_ATTACHED 125
    -| SQLITE_MAX_ATTACHED 125
    -| Caught toobig exception
    +
    SQLITE_LIMIT_LENGTH 1000000000
    +SQLITE_MAX_LENGTH   1000000000
    +SQLITE_LIMIT_COLUMN 2000
    +SQLITE_MAX_COLUMN   2000
    +SQLITE_LIMIT_ATTACHED 125
    +SQLITE_MAX_ATTACHED   125
    +Caught toobig exception
     
    -
    ###
    -### Backup to memory
    -###
    -
    -# We will copy the disk database into a memory database
    -
    +
    +
    +

    Backup an open database

    +

    You can backup a database that is open. The pages are copied in +batches of your choosing and allow continued use of the database. +Read more.

    +
    # We will copy a disk database into a memory database
     memcon = apsw.Connection(":memory:")
     
     # Copy into memory
     with memcon.backup("main", connection, "main") as backup:
    -    backup.step()  # copy whole database in one go
    -
    -# There will be no disk accesses for this query
    -for row in memcon.cursor().execute("select * from s"):
    -    pass
    +    backup.step(10)  # copy 10 pages in each batch
     
    -
    ###
    -### Shell
    -###
    +
    +
    +

    Shell

    +

    APSW includes a shell like the one in SQLite, and is also extensible from +Python.

    +
    import apsw.shell
     
    -# Here we use the shell to do a csv export providing the existing db
    -# connection
    +# Here we use the shell to do a csv export and then dump part of the
    +# database
     
     # Export to a StringIO
     import io
     
     output = io.StringIO()
    -shell = apsw.Shell(stdout=output, db=connection)
    +shell = apsw.shell.Shell(stdout=output, db=connection)
    +
     # How to execute a dot command
     shell.process_command(".mode csv")
     shell.process_command(".headers on")
    +
     # How to execute SQL
    -shell.process_sql(
    -    "create table csvtest(col1,col2); insert into csvtest values(3,4); insert into csvtest values('a b', NULL)")
    -# Let the shell figure out SQL vs dot command
    +shell.process_sql("""
    +    create table csvtest(column1, column2 INTEGER);
    +    create index faster on csvtest(column1);
    +    insert into csvtest values(3, 4);
    +    insert into csvtest values('a b', NULL);
    +""")
    +
    +# Or let the shell figure out SQL vs dot command
     shell.process_complete_line("select * from csvtest")
     
    -# Verify output
    +# see the result
    +print(output.getvalue())
    +
    +# reset output
    +output.seek(0)
    +
    +# make a dump of the same table
    +shell.process_command(".dump csvtest%")
    +
    +# see the result
    +print("\nDump output\n")
     print(output.getvalue())
     
    -
    | col1,col2
    -| 3,4
    -| a b,
    -|
    +
    column1,column2
    +3,4
    +a b,
    +
    +
    +Dump output
    +
    +-- SQLite dump (by APSW 3.40.0.0)
    +-- SQLite version 3.40.0
    +-- Date: Sun Nov 27 07:17:16 2022
    +-- Tables like: csvtest%
    +-- Database: /space/apsw/dbfile
    +-- User: rogerb @ clamps
    +
    +-- The values of various per-database settings
    +PRAGMA page_size=4096;
    +-- PRAGMA encoding='UTF-8';
    +-- PRAGMA auto_vacuum=NONE;
    +-- PRAGMA max_page_count=1073741823;
    +
    +BEGIN TRANSACTION;
    +
    +-- Table  csvtest
    +DROP TABLE IF EXISTS csvtest;
    +CREATE TABLE csvtest(column1, column2 INTEGER);
    +INSERT INTO csvtest VALUES(3,4);
    +INSERT INTO csvtest VALUES('a b',NULL);
    +-- Triggers and indices on  csvtest
    +CREATE INDEX faster on csvtest(column1);
    +
    +COMMIT TRANSACTION;
     
    -
    ###
    -### Statistics
    -###
    -
    -print("SQLite memory usage current %d max %d" % apsw.status(apsw.SQLITE_STATUS_MEMORY_USED))
    +
    +
    +

    Statistics

    +

    SQLite provides statistics by status()

    +
    current_usage, max_usage = apsw.status(apsw.SQLITE_STATUS_MEMORY_USED)
    +print(f"SQLite memory usage { current_usage } max { max_usage }")
     
    -
    | SQLite memory usage current 315008 max 326872
    +
    SQLite memory usage 298832 max 345560
     
    -
    ###
    -### Cleanup
    -###
    -
    -# We can close connections manually (useful if you want to catch exceptions)
    -# but you don't have to
    -connection.close(True)  # force it since we want to exit
    +
    +
    +

    Cleanup

    +

    As a general rule you do not need to do any cleanup. Standard +Python garbage collection will take of everything. Even if the +process crashes with a connection in the middle of a transaction, +the next time SQLite opens that database it will automatically +rollback the partial data.

    +
    # You close connections manually (useful if you want to catch exceptions)
    +connection.close()
    +#  You can call close multiple times, and also indicate to ignore exceptions
    +connection.close(True)
     
    -# Delete database - we don't need it any more
    +# Deleting the database file. Note that there can be additional files
    +# with suffixes like -wal, -shm, and -journal.
     os.remove("dbfile")
     
    +
    @@ -758,12 +1358,54 @@
    @@ -800,15 +1442,15 @@
  • previous |
  • - - + +
    @@ -584,17 +647,17 @@ next |
  • - previous |
  • - + @@ -54,9 +56,9 @@
    -

    Execution and tracing

    +

    Execution and tracing

    -

    Execution model

    +

    Execution model

    This section only matters if you give multiple SQL statements in one go to Cursor.execute. (Statements are separated by semi-colons.)

    SQLite does execution in two steps. First a statement is prepared, @@ -66,10 +68,8 @@ or the statement is complete.

    The Cursor.execute() method automatically does the preparing and starts execution. If none of the statements return rows then execution -will go to the end. If a row is returned then you need to call -Cursor.next() to get the row values or use the cursor as an -iterator. Execution will resume as necessary to satisfy -next() calls.

    +will go to the end. If a row is returned then you use the cursor as an +iterator. Execution will resume as necessary to return each result row.

    However this means that if you don’t read the rows returned then the rest of your statements won’t be executed. APSW will detect unexecuted previous statements and generate an exception. For @@ -87,7 +87,7 @@ exception raised.

    -

    Multi-threading and re-entrancy

    +

    Multi-threading and re-entrancy

    ASPW lets you use SQLite in multi-threaded programs and will let other threads execute while SQLite is working. (Technically the GIL is released when sqlite3_prepare_v2, @@ -99,7 +99,7 @@

    Note that you cannot use the same cursor object in multiple threads concurrently to execute statements. APSW will detect this and throw an exception. It is safe to use the object serially (eg calling -Cursor.execute() in one thread and Cursor.next() in +Cursor.execute() in one thread and iterator in another. You also can’t do things like try to close() a Connection concurrently in two threads.

    If you have multiple threads and/or multiple programs accessing the @@ -118,20 +118,20 @@ your trace or user defined functions.

    -

    64 bit hosts

    +

    64 bit hosts

    APSW is tested and works correctly on 32 and 64 bit hosts. Unfortunately SQLite is limited to 32 bit quantities for strings, blobs, number of columns etc even when compiled for 64 bit. Consequently you will get a TooBig exception from APSW which checks if strings/buffers longer than 1GB or 2GB (depends on internal storage) -are used. See SQLite ticket #2125 and SQLite ticket #3246 for more details.

    +are used. cvstrac 2125 and 3246 had more details.

    -

    Statement Cache

    +

    Statement Cache

    Each Connection maintains a cache mapping SQL queries to a prepared statement to avoid the overhead of repreparing queries that are executed -multiple times. This is a classic tradeoff using more memory to +multiple times. This is a classic trade off using more memory to reduce CPU consumption.

    By default there are up to 100 entries in the cache. Once the cache is full, the least recently used item is discarded to make space for @@ -139,15 +139,14 @@

    You should pick a larger cache size if you have more than 100 unique queries that you run. For example if you have 101 different queries you run in order then the cache will not help.

    -

    You can also specify zero which will disable the -statement cache.

    -

    If you are using authorizers then -you should disable the statement cache. This is because the -authorizer callback is only called while statements are being -prepared.

    +

    If you are using authorizers then be +aware authorizer callback is only called while statements are being +prepared. You can specify zero which will +disable the statement cache completely, use use can_cache = False +flag to execute/executemany.

    -

    Tracing

    +

    Tracing

    You can install tracers on cursors or connections as an easy way of seeing exactly what gets executed and what is returned. The tracers can also abort @@ -160,13 +159,13 @@ your tracer was called from. If you would like to make more queries in the tracer then do them from a new cursor object. For example:

    def exectracer(cursor, sql, bindings):
    -  cursor.getconnection().cursor("insert into log values(?,?)", (sql,str(bindings)))
    +  cursor.connection.cursor().execute("insert into log values(?,?)", (sql,str(bindings)))
       return True
     
    -

    Execution Tracer

    +

    Execution Tracer

    The execution tracer is called after an SQL statement has been prepared. (ie syntax errors will have caused an exception during preparation so you won’t see them with a tracer). It is called with @@ -177,17 +176,17 @@

    sql

    The SQL text being executed

    -
    bindings

    The bindings being used. This may be None, a dictionary or +

    bindings

    The bindings being used. This may be `None, a dictionary or a tuple.

    -

    If the tracer return value evaluates to False/None then execution is +

    If the tracer return value is False then execution is aborted with an ExecTraceAbort exception. See the example.

    -

    Execution tracers can be installed on a specific cursor by calling -Cursor.setexectrace() or for all cursors by calling -Connection.setexectrace(), with the cursor tracer taking +

    Execution tracers can be installed on a specific cursor by setting +Cursor.exectrace or for all cursors by setting +Connection.exectrace, with the cursor tracer taking priority.

    If you use the Connection with statement and have a Connection execution tracer then your callback will also be @@ -196,7 +195,7 @@ since there is no cursor involved.

    -

    Row Tracer

    +

    Row Tracer

    The row tracer is called before each row is returned. It is called with two arguments.

    @@ -210,29 +209,21 @@

    Whatever you return from the tracer is what is actually returned to the caller of execute(). If you return None then the whole row is skipped. See the example.

    -

    Row tracers can be installed on a specific cursor by calling -Cursor.setrowtrace() or for all cursors by calling -Connection.setrowtrace(), with the cursor tracer taking +

    Row tracers can be installed on a specific cursor by setting +Cursor.rowtrace or for all cursors by setting +Connection.rowtrace, with the cursor tracer taking priority.

    -

    APSW Trace

    -

    APSW includes a tracing script as part of the source -distribution named apswtrace.py, or you -can get a copy directly from source control (choose “Raw File”). This script lets you -easily trace SQL execution as well as providing a summary report -without modifying your code. If it is installed anywhere on your -PYTHONPATH then you can invoke it with -m:

    -
    $ python -m apswtrace [apswtrace options] yourscript.py [your options]
    -
    -
    -

    You can also invoke it directly:

    -
    $ python /path/to/apswtrace.py [apswtrace options] yourscript.py [your options]
    -
    -
    +

    APSW Trace

    +

    APSW includes a tracer that lets you easily trace SQL execution as +well as providing a summary report without modifying your code.

    +
    +

    $ python3 -m apsw.trace [apswtrace options] yourscript.py [your options]

    +

    All output is UTF-8 encoded. The following options are available:

    -
    $ python apswtrace.py --help
    +
    $ python3 -m apsw.trace --help
     Usage: apswtrace.py [options] pythonscript.py [pythonscriptoptions]
     
     This script runs a Python program that uses APSW and reports on SQL queries
    @@ -260,8 +251,8 @@
                             [summary,popular,aggregate,individual]
     
    -

    This is sample output with the following options: --sql, ---rows, --timestamps, --thread

    +

    This is sample output with the following options: –sql, +–rows, –timestamps, –thread

    1e0e5a0 0.152 7fccea8456e0 OPEN: ":memory:" unix READWRITE|CREATE
     1f72ac0 0.161 7fccea8456e0 OPEN: "testdb" unix READWRITE|CREATE
     1f6b8d0 0.162 7fccea8456e0 CURSORFROM: 1f72ac0 DB: "testdb"
    @@ -298,7 +289,7 @@
     
    OPEN: “dbname” vfs open_flags

    A Connection has been opened. The dbname is the filename exactly as given in the call to Connection. vfs is the name of the VFS -used to open the database. open_flags is the set of flags supplied with the leading SQLITE_OPEN +used to open the database. open_flags is the set of flags supplied with the leading SQLITE_OPEN prefix omitted.

    CURSORFROM: connectionid DB: “dbname”

    A cursor has been allocated. The id at the beginning of this row @@ -414,8 +405,9 @@

    @@ -473,15 +470,15 @@
  • previous |
  • - +
    @@ -54,13 +56,13 @@
    -

    Extensions

    +

    Extensions

    SQLite includes a number of extensions providing additional functionality. All extensions are disabled by default and you need to take steps to have them available at compilation time, to enable them and then to use them.

    -

    FTS3/4/5

    +

    FTS3/4/5

    FTS3 is the third version of the full text search extension. It makes it easy to find words in multi-word text fields. You must enable the extension via setup.py build flags before it will work. There are no additional @@ -72,23 +74,23 @@ compatibility.

    -

    ICU

    +

    ICU

    The ICU extension provides an International Components for Unicode interface, in particular enabling you do sorting and regular expressions in a locale aware way. The documentation shows how to use it.

    -

    JSON1

    +

    JSON1

    Provides functions for managing JSON data stored in SQLite.

    -

    RBU

    +

    RBU

    Provides resumable bulk update intended for use with large SQLite databases on low power devices at the edge of a network.

    -

    RTree

    +

    RTree

    The RTree extension provides a spatial table - see the documentation. You must enable the extension via setup.py build flags before it will work. There are no additional APIs and the documented SQL @@ -103,8 +105,9 @@

    @@ -157,15 +165,15 @@
  • previous |
  • - + @@ -64,6 +66,7 @@ | N | O | P + | Q | R | S | T @@ -77,6 +80,12 @@

    _

    + - + - @@ -130,18 +179,30 @@ or other required elements. + thead: [ 1, "
    • apsw @@ -114,12 +145,30 @@
    • module
    -
    • unregister() (VFS method)
    • +
    • Update hook (example code) +
    • UpdateChangeRow() (VTTable method)
    • UpdateDeleteRow() (VTTable method) @@ -849,7 +1161,9 @@
    • URIFilename (class in apsw)
    • -
    • usage() (Shell method) +
    • usage() (Shell method) +
    • +
    • Using different types (example code)
    • using_amalgamation (in module apsw)
    • @@ -859,8 +1173,12 @@

      V

      @@ -985,7 +1313,7 @@ - +
      @@ -999,15 +1327,15 @@
    • modules |
    • - + @@ -50,9 +52,9 @@
      -

      APSW documentation

      +

      APSW documentation

      -APSW 3.39.2.0 released 31 August 2022

      Use with SQLite 3.39 or later, CPython 3.6 and later:

      +APSW 3.40.0.0 released 27 November 2022

      Use with SQLite 3.39 or later, CPython 3.6 and later:

      Version 3.37.0-r1 from January 2022 supports all CPython versions back to 2.3. The @@ -62,7 +64,7 @@

      APSW provides an SQLite 3 wrapper that provides the thinnest layer over the SQLite database library possible. Everything you can do from the SQLite C API, you can do from Python. -Although APSW looks vaguely similar to the PEP 249 (DBAPI), it is +Although APSW looks vaguely similar to the PEP 249 (DBAPI), it is not compliant with that API because instead it works the way SQLite 3 does. (Read more about the differences).

      In general you should use Python’s builtin sqlite3 module. Use APSW when @@ -70,8 +72,11 @@ control what versions are used, or want to control SQLite’s configuration (primarily done at compile time) or extensions (like JSON or FTS)

      APSW is hosted at https://github.com/rogerbinns/apsw and can be -downloaded from PyPI

      -

      Contents:

      +downloaded from PyPI

      +
      -
      -

      Indices and tables

      - -
      @@ -339,15 +380,11 @@
      @@ -381,15 +418,15 @@
    • next |
    • - + - + + + + + + +
      a
      apsw Python access to SQLite database library
          + apsw.ext +
          + apsw.shell +
      @@ -85,7 +94,7 @@ - +
      @@ -99,15 +108,15 @@
    • modules |
    • - +
    @@ -53,8 +55,8 @@
    -
    -

    pysqlite differences

    +
    +

    sqlite3 module differences

    The sqlite3 standard module and APSW approached the problem of providing access to SQLite from Python from fundamentally different directions.

    @@ -71,7 +73,7 @@ needs are simple, and you don’t want to use SQLite features.

    -

    What APSW does better

    +

    What APSW does better

    APSW has the following enhancements/differences over the sqlite3 module:

      @@ -86,11 +88,9 @@ used in the same thread. You can disable its checking, but unless you are very careful with your own mutexes you will have a crash or a deadlock.

      -
    • APSW is a single file for the extension, apsw.pyd on Windows -and apsw.so on Unix/Mac (Note PEP 3149). There are no -other files needed and the build instructions show -you how to include SQLite statically in this file. You can put this -file anywhere your Python session can reach.

    • +
    • APSW build instructions show you how to include +SQLite statically in the extension, avoiding a dependency on system +SQLite.

    • Nothing happens behind your back. By default sqlite3 tries to manage transactions (for DBAPI compliance) by parsing your SQL for you, but you can turn it off. This can result in very unexpected @@ -123,7 +123,7 @@

    • Cursor.executemany() also works with statements that return data such as selects, and you can have multiple statements. -sqlite3’s executescript() method doesn’t allow any form of +sqlite3’s executescript method doesn’t allow any form of data being returned (it silently ignores any returned data).

    • sqlite3 swallows exceptions in your callbacks making it far harder to debug problems. That also prevents you from raising exceptions in @@ -176,12 +176,9 @@

    • APSW has significantly enhanced debuggability. More details are available than just what is printed out when exceptions happen like above. See augmented stack traces

    • -
    • APSW has execution and row tracers. sqlite3 has no -equivalent to execution tracers and does -have data adaptors which aren’t the same thing as a row tracer (for example you can’t skip rows or add a new column to -each row returned). sqlite3 does have a row factory -but you can easily emulate that with the row tracer and -Cursor.getdescription().

    • +
    • APSW has better execution and row tracing. +Various interesting and useful bits of functionality provides accessing rows by column name, type conversion, +getting query details etc.

    • APSW has an apswtrace utility script that traces execution and results in your code without having to modify it in any way. It also outputs summary reports making it easy to see what @@ -204,16 +201,10 @@

    -

    What sqlite3 does better

    +

    What sqlite3 does better

      -
    • sqlite3 has an adaptor system -that lets you pretend SQLite stores and returns more types than it -really supports. Note that the database won’t be useful in a -non-sqlite3 context (eg PHP code looking at the same database isn’t -going to recognise your Point class). You can implement something -similar in APSW by intercepting Cursor.execute() calls that -suitably mangles the bindings going to SQLite and does something -similar to the rows the iterator returns.

    • +
    • sqlite3 is part of the standard library, and is widely supported by +libraries that abstract away the database layer

    @@ -225,21 +216,27 @@
    @@ -276,15 +273,15 @@
  • previous |
  • - - + + @@ -100,15 +102,15 @@
  • modules |
  • - +
    @@ -614,20 +656,20 @@ modules |
  • - next |
  • previous |
  • - +
    ", "
    " ], + col: [ 2, "", "
    " ], + tr: [ 2, "", "
    " ], + td: [ 3, "", "
    " ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + + // Support: Chrome 86+ + // In Chrome, if an element having a focusout handler is blurred by + // clicking outside of it, it invokes the handler synchronously. If + // that handler calls `.remove()` on the element, the data is cleared, + // leaving `result` undefined. We need to guard against this. + return result && result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + which: true +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + // Suppress native focus or blur as it's already being fired + // in leverageNative. + _default: function() { + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + // + // Support: Firefox 70+ + // Only Firefox includes border widths + // in computed dimensions. (gh-4529) + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; + tr.style.cssText = "border:1px solid"; + + // Support: Chrome 86+ + // Height set through cssText does not get applied. + // Computed height then comes back as 0. + tr.style.height = "1px"; + trChild.style.height = "9px"; + + // Support: Android 8 Chrome 86+ + // In our bodyBackground.html iframe, + // display for all div elements is set to "inline", + // which causes a problem only in Android 8 Chrome 86. + // Ensuring the div is display: block + // gets around this issue. + trChild.style.display = "block"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + + parseInt( trStyle.borderTopWidth, 10 ) + + parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml, parserErrorElem; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) {} + + parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; + if ( !xml || parserErrorElem ) { + jQuery.error( "Invalid XML: " + ( + parserErrorElem ? + jQuery.map( parserErrorElem.childNodes, function( el ) { + return el.textContent; + } ).join( "\n" ) : + data + ) ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ).filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ).map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + +originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script but not if jsonp + if ( !isSuccess && + jQuery.inArray( "script", s.dataTypes ) > -1 && + jQuery.inArray( "json", s.dataTypes ) < 0 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + - + +
    @@ -429,20 +443,20 @@ modules |
  • - next |
  • previous |
  • - + @@ -54,7 +56,7 @@
    -

    Types

    +

    Types

    Read about SQLite 3 types. ASPW always maintains the correct type for values, and never converts them to something else. Note however that SQLite may convert types based on column @@ -62,7 +64,7 @@ requires that all values supplied are one of the corresponding Python/SQLite types (or a subclass).

    -

    Mapping

    +

    Mapping

    • None in Python is NULL in SQLite

    • Python int is INTEGER in SQLite. The value represented must fit @@ -76,7 +78,7 @@

    -

    Unicode

    +

    Unicode

    All SQLite strings are Unicode. The actual binary representations can be UTF8, or UTF16 in either byte order. ASPW uses the UTF8 interface to SQLite which results in the binary string representation in your @@ -124,8 +126,9 @@

    @@ -175,15 +183,15 @@
  • previous |
  • - + @@ -54,8 +56,8 @@
    -

    Virtual File System (VFS)

    -

    SQLite 3.6 has new VFS functionality which defines the interface +

    Virtual File System (VFS)

    +

    SQLite 3.6 added VFS functionality which defines the interface between the SQLite core and the underlying operating system. The majority of the functionality deals with files. APSW exposes this functionality letting you provide your own routines. You can also @@ -84,12 +86,12 @@ routines are treated as Unicode.

    -

    Exceptions and errors

    +

    Exceptions and errors

    To return an error from any routine you should raise an exception. The exception will be translated into the appropriate SQLite error code for SQLite. To return a specific SQLite error code use exceptionfor(). If the exception does not map to any specific -error code then SQLITE_ERROR which corresponds to +error code then SQLITE_ERROR which corresponds to SQLError is returned to SQLite.

    The SQLite code that deals with VFS errors behaves in varying ways. Some routines have no way to return an error (eg xDlOpen just returns zero/NULL on @@ -157,7 +159,7 @@ ZeroDivision exception

    -

    SQLITE_ERROR (closest +

    SQLITE_ERROR (closest matching SQLite error code) is returned to SQLite by APSW

    @@ -173,7 +175,7 @@

    SQLite returns -SQLITE_FULL to APSW

    +SQLITE_FULL to APSW

    APSW returns apsw.FullError

    @@ -184,10 +186,10 @@
    -

    VFS class

    +

    VFS class

    -class VFS(name: str, base: Optional[str] = None, makedefault: bool = False, maxpathname: int = 1024)
    +class VFS(name: str, base: Optional[str] = None, makedefault: bool = False, maxpathname: int = 1024)

    Provides operating system access. You can get an overview in the SQLite documentation. To create a VFS your Python class must inherit from VFS.

    @@ -209,7 +211,7 @@
    Raises
    -

    ValueError – If base is not None and the named vfs is not +

    ValueError – If base is not None and the named vfs is not currently registered.

    @@ -224,7 +226,7 @@
    -VFS.excepthook(etype: type[BaseException], evalue: BaseException, etraceback: Optional[TracebackType]) Any
    +VFS.excepthook(etype: type[BaseException], evalue: BaseException, etraceback: Optional[types.TracebackType]) Any

    Called when there has been an exception in a VFS routine. The default implementation passes args to sys.excepthook and if that fails then PyErr_Display. The three arguments correspond to @@ -233,7 +235,7 @@

    -VFS.unregister() None
    +VFS.unregister() None

    Unregisters the VFS making it unavailable to future database opens. You do not need to call this as the VFS is automatically unregistered by when the VFS has no more references or open @@ -245,7 +247,7 @@

    -VFS.xAccess(pathname: str, flags: int) bool
    +VFS.xAccess(pathname: str, flags: int) bool

    SQLite wants to check access permissions. Return True or False accordingly.

    @@ -260,7 +262,7 @@
    -VFS.xCurrentTime() float
    +VFS.xCurrentTime() float

    Return the Julian Day Number as a floating point number where the integer portion is the day and the fractional part is the time. Do not adjust for timezone (ie use UTC).

    @@ -268,10 +270,10 @@
    -VFS.xDelete(filename: str, syncdir: bool) None
    +VFS.xDelete(filename: str, syncdir: bool) None

    Delete the named file. If the file is missing then raise an IOError exception with extendedresult -SQLITE_IOERR_DELETE_NOENT

    +SQLITE_IOERR_DELETE_NOENT

    Parameters
      @@ -287,7 +289,7 @@
      -VFS.xDlClose(handle: int) None
      +VFS.xDlClose(handle: int) None

      Close and unload the library corresponding to the handle you returned from xDlOpen(). You can use ctypes to do this:

      @@ -301,7 +303,7 @@
      -VFS.xDlError() str
      +VFS.xDlError() str

      Return an error string describing the last error of xDlOpen() or xDlSym() (ie they returned zero/NULL). If you do not supply this routine then SQLite provides @@ -314,7 +316,7 @@

      -VFS.xDlOpen(filename: str) int
      +VFS.xDlOpen(filename: str) int

      Load the shared library. You should return a number which will be treated as a void pointer at the C level. On error you should return 0 (NULL). The number is passed as is to @@ -329,7 +331,7 @@

      -VFS.xDlSym(handle: int, symbol: str) int
      +VFS.xDlSym(handle: int, symbol: str) int

      Returns the address of the named symbol which will be called by SQLite. On error you should return 0 (NULL). You can use ctypes:

      def xDlSym(ptr, name):
      @@ -349,13 +351,13 @@
       
       
      -VFS.xFullPathname(name: str) str
      +VFS.xFullPathname(name: str) str

      Return the absolute pathname for name. You can use os.path.abspath to do this.

      -VFS.xGetLastError() Tuple[int, str]
      +VFS.xGetLastError() Tuple[int, str]

      This method is to return an integer error code and (optional) text describing the last error that happened in this thread.

      @@ -367,14 +369,14 @@
      -VFS.xGetSystemCall(name: str) Optional[int]
      +VFS.xGetSystemCall(name: str) Optional[int]

      Returns a pointer for the current method implementing the named system call. Return None if the call does not exist.

      -VFS.xNextSystemCall(name: Optional[str]) Optional[str]
      +VFS.xNextSystemCall(name: Optional[str]) Optional[str]

      This method is repeatedly called to iterate over all of the system calls in the vfs. When called with None you should return the name of the first system call. In subsequent calls return the @@ -391,22 +393,22 @@

      -VFS.xOpen(name: Optional[Union[str, URIFilename]], flags: List[int, int]) VFSFile
      +VFS.xOpen(name: Optional[Union[str, URIFilename]], flags: List[int, int]) VFSFile

      This method should return a new file object based on name. You can return a VFSFile from a completely different VFS.

      Parameters
        -
      • name – File to open. Note that name may be None in which +

      • name – File to open. Note that name may be None in which case you should open a temporary file with a name of your choosing. May be an instance of URIFilename.

      • flags – A list of two integers [inputflags, outputflags]. Each integer is one or more of the open flags binary orred together. The inputflags tells you what SQLite wants. For -example SQLITE_OPEN_DELETEONCLOSE means the file should +example SQLITE_OPEN_DELETEONCLOSE means the file should be automatically deleted when closed. The outputflags describes how you actually did open the file. For example if you -opened it read only then SQLITE_OPEN_READONLY should be +opened it read only then SQLITE_OPEN_READONLY should be set.

      @@ -415,7 +417,7 @@
      -VFS.xRandomness(numbytes: int) bytes
      +VFS.xRandomness(numbytes: int) bytes

      This method is called once when SQLite needs to seed the random number generator. It is called on the default VFS only. It is not called again, even across apsw.shutdown() calls. You can @@ -425,7 +427,7 @@

      -VFS.xSetSystemCall(name: Optional[str], pointer: int) bool
      +VFS.xSetSystemCall(name: Optional[str], pointer: int) bool

      Change a system call used by the VFS. This is useful for testing and some other scenarios such as sandboxing.

      @@ -453,7 +455,7 @@
      -VFS.xSleep(microseconds: int) int
      +VFS.xSleep(microseconds: int) int

      Pause execution of the thread for at least the specified number of microseconds (millionths of a second). This routine is typically called from the busy handler.

      @@ -469,10 +471,10 @@
    -

    VFSFile class

    +

    VFSFile class

    -class VFSFile(vfs: str, filename: Union[str, URIFilename], flags: List[int])
    +class VFSFile(vfs: str, filename: Union[str, URIFilename], flags: List[int])

    Wraps access to a file. You only need to derive from this class if you want the file object returned from VFS.xOpen() to inherit from an existing VFS implementation.

    @@ -491,7 +493,7 @@
    Raises
    -

    ValueError – If the named VFS is not registered.

    +

    ValueError – If the named VFS is not registered.

    @@ -508,7 +510,7 @@
    -VFSFile.excepthook(etype: type[BaseException], evalue: BaseException, etraceback: Optional[TracebackType]) None
    +VFSFile.excepthook(etype: type[BaseException], evalue: BaseException, etraceback: Optional[types.TracebackType]) None

    Called when there has been an exception in a VFSFile routine. The default implementation calls sys.excepthook and if that fails then PyErr_Display. The three arguments @@ -527,14 +529,14 @@

    -VFSFile.xCheckReservedLock() bool
    +VFSFile.xCheckReservedLock() bool

    Returns True if any database connection (in this or another process) has a lock other than SQLITE_LOCK_NONE or SQLITE_LOCK_SHARED.

    -VFSFile.xClose() None
    +VFSFile.xClose() None

    Close the database. Note that even if you return an error you should still close the file. It is safe to call this method multiple times.

    @@ -542,7 +544,7 @@
    -VFSFile.xDeviceCharacteristics() int
    +VFSFile.xDeviceCharacteristics() int

    Return I/O capabilities (bitwise or of appropriate values). If you do not implement the function or have an error then 0 (the SQLite default) is returned.

    @@ -550,7 +552,7 @@
    -VFSFile.xFileControl(op: int, ptr: int) bool
    +VFSFile.xFileControl(op: int, ptr: int) bool

    Receives file control request typically issued by Connection.filecontrol(). See Connection.filecontrol() for an example of how to pass a @@ -586,14 +588,14 @@

    -VFSFile.xFileSize() int
    +VFSFile.xFileSize() int

    Return the size of the file in bytes. Remember that file sizes are 64 bit quantities even on 32 bit operating systems.

    -VFSFile.xLock(level: int) None
    +VFSFile.xLock(level: int) None

    Increase the lock to the level specified which is one of the SQLITE_LOCK family of constants. If you can’t increase the lock level because @@ -602,7 +604,7 @@

    -VFSFile.xRead(amount: int, offset: int) bytes
    +VFSFile.xRead(amount: int, offset: int) bytes

    Read the specified amount of data starting at offset. You should make every effort to read all the data requested, or return an error. If you have the file open for non-blocking I/O or if @@ -622,7 +624,7 @@

    -VFSFile.xSectorSize() int
    +VFSFile.xSectorSize() int

    Return the native underlying sector size. SQLite uses the value returned in determining the default database page size. If you do not implement the function or have an error then 4096 (the SQLite @@ -631,7 +633,7 @@

    -VFSFile.xSync(flags: int) None
    +VFSFile.xSync(flags: int) None

    Ensure data is on the disk platters (ie could survive a power failure immediately after the call returns) with the sync flags detailing what needs to be synced. You can sync more than what is requested.

    @@ -639,14 +641,14 @@
    -VFSFile.xTruncate(newsize: int) None
    +VFSFile.xTruncate(newsize: int) None

    Set the file length to newsize (which may be more or less than the current length).

    -VFSFile.xUnlock(level: int) None
    +VFSFile.xUnlock(level: int) None

    Decrease the lock to the level specified which is one of the SQLITE_LOCK family of constants.

    @@ -654,7 +656,7 @@
    -VFSFile.xWrite(data: bytes, offset: int) None
    +VFSFile.xWrite(data: bytes, offset: int) None

    Write the data starting at absolute offset. You must write all the data requested, or return an error. If you have the file open for non-blocking I/O or if signals happen then it is possible for the @@ -669,7 +671,7 @@

    -

    URIFilename class

    +

    URIFilename class

    class URIFilename
    @@ -686,13 +688,13 @@
    -URIFilename.filename() str
    +URIFilename.filename() str

    Returns the filename.

    -URIFilename.uri_boolean(name: str, default: bool) bool
    +URIFilename.uri_boolean(name: str, default: bool) bool

    Returns the boolean value for parameter name or default if not present.

    Calls: sqlite3_uri_boolean

    @@ -700,7 +702,7 @@
    -URIFilename.uri_int(name: str, default: int) int
    +URIFilename.uri_int(name: str, default: int) int

    Returns the integer value for parameter name or default if not present.

    Calls: sqlite3_uri_int64

    @@ -708,7 +710,7 @@
    -URIFilename.uri_parameter(name: str) Optional[str]
    +URIFilename.uri_parameter(name: str) Optional[str]

    Returns the value of parameter name or None.

    Calls: sqlite3_uri_parameter

    @@ -723,23 +725,78 @@
    @@ -776,15 +833,15 @@
  • previous |
  • - + @@ -54,7 +56,7 @@
    -

    Virtual Tables

    +

    Virtual Tables

    Virtual Tables are a feature introduced in SQLite 3.3.7. They let a developer provide an underlying table implementations, while still presenting a normal SQL interface @@ -77,11 +79,11 @@ level, they are just one set of methods. At the Python/APSW level, they are split over the 3 types of object. The leading x is omitted in Python. You can return SQLite error codes (eg -SQLITE_READONLY) by raising the appropriate exceptions (eg +SQLITE_READONLY) by raising the appropriate exceptions (eg ReadOnlyError). exceptionfor() is a useful helper function to do the mapping.

    -

    VTModule class

    +

    VTModule class

    class VTModule
    @@ -89,9 +91,9 @@

    Note

    -

    There is no actual VTModule class - it is just shown this way -for documentation convenience. Your module instance should implement -all the methods documented here.

    +

    There is no actual VTModule class - it is shown this way for +documentation convenience and is present as a typing protocol. +Your module instance should implement all the methods documented here.

    A module instance is used to create the virtual tables. Once you have a module object, you register it with a connection by calling @@ -103,7 +105,7 @@ con.createmodule("modulename", mymod) # tell SQLite about the table -con.cursor().execute("create VIRTUAL table tablename USING modulename('arg1', 2)") +con.execute("create VIRTUAL table tablename USING modulename('arg1', 2)")

    The create step is to tell SQLite about the existence of the table. @@ -111,7 +113,7 @@ way. Note the (optional) arguments which are passed to the module.

    -VTModule.Connect(connection, modulename, databasename, tablename, *args) [ sql string, table object ]
    +VTModule.Connect(connection: Connection, modulename: str, databasename: str, tablename: str, *args: Tuple[SQLiteValue, ...]) Tuple[str, VTTable]

    The parameters and return are identical to Create(). This method is called when there are additional references to the table. Create() will be called the first time and @@ -134,7 +136,7 @@

    -VTModule.Create(connection, modulename, databasename, tablename, *args) [ sql string, table object ]
    +VTModule.Create(connection: Connection, modulename: str, databasename: str, tablename: str, *args: Tuple[SQLiteValue, ...]) Tuple[str, VTTable]

    Called when a table is first created on a connection.

    Parameters
    @@ -158,15 +160,15 @@
    -

    VTTable class

    +

    VTTable class

    class VTTable

    Note

    -

    There is no actual VTTable class - it is just shown this way for -documentation convenience. Your table instance should implement -the methods documented here.

    +

    There is no actual VTTable class - it is shown this way for +documentation convenience and is present as a typing protocol. +Your table instance should implement the methods documented here.

    The VTTable object contains knowledge of the indices, makes cursors and can perform transactions.

    @@ -181,16 +183,16 @@
    -VTTable.Begin()
    +VTTable.Begin() None

    This function is used as part of transactions. You do not have to provide the method.

    -VTTable.BestIndex(constraints, orderbys)
    +VTTable.BestIndex(constraints: Sequence[Tuple[int, int], ...], orderbys: Sequence[Tuple[int, int], ...]) Any

    This is a complex method. To get going initially, just return -None and you will be fine. Implementing this method reduces +None and you will be fine. Implementing this method reduces the number of rows scanned in your table to satisfy queries, but only if you have an index or index like mechanism available.

    @@ -361,23 +363,24 @@
    -VTTable.Commit()
    +VTTable.Commit() None

    This function is used as part of transactions. You do not have to provide the method.

    -VTTable.Destroy()
    +VTTable.Destroy() None

    The opposite of VTModule.Create(). This method is called when the table is no longer used. Note that you must always release resources even if you intend to return an error, as it will not be -called again on error. SQLite may also leak memory if you return an error.

    +called again on error. SQLite may also leak memory +if you return an error.

    -VTTable.Disconnect()
    +VTTable.Disconnect() None

    The opposite of VTModule.Connect(). This method is called when a reference to a virtual table is no longer used, but VTTable.Destroy() will be called when the table is no longer used.

    @@ -385,7 +388,7 @@
    -VTTable.FindFunction(name, nargs)
    +VTTable.FindFunction(name: str, nargs: int)

    Called to find if the virtual table has its own implementation of a particular scalar function. You should return the function if you have it, else return None. You do not have to provide this method.

    @@ -412,13 +415,13 @@
    -VTTable.Open()
    +VTTable.Open() VTCursor

    Returns a cursor object.

    -VTTable.Rename(newname)
    +VTTable.Rename(newname: str) None

    Notification that the table will be given a new name. If you return without raising an exception, then SQLite renames the table (you don’t have to do anything). If you raise an exception then the @@ -427,21 +430,21 @@

    -VTTable.Rollback()
    +VTTable.Rollback() None

    This function is used as part of transactions. You do not have to provide the method.

    -VTTable.Sync()
    +VTTable.Sync() None

    This function is used as part of transactions. You do not have to provide the method.

    -VTTable.UpdateChangeRow(row, newrowid, fields)
    +VTTable.UpdateChangeRow(row: int, newrowid: int, fields: Tuple[SQLiteValue, ...])

    Change an existing row. You may also need to change the rowid - for example if the query was UPDATE table SET rowid=rowid+100 WHERE ...

    @@ -457,7 +460,7 @@
    -VTTable.UpdateDeleteRow(rowid)
    +VTTable.UpdateDeleteRow(rowid: int)

    Delete the row with the specified rowid.

    Parameters
    @@ -468,18 +471,18 @@
    -VTTable.UpdateInsertRow(rowid, fields) newrowid
    +VTTable.UpdateInsertRow(rowid: Optional[int], fields: Tuple[SQLiteValue, ...]) Optional[int]

    Insert a row with the specified rowid.

    Parameters
      -
    • rowidNone if you should choose the rowid yourself, else a 64 bit integer

    • +
    • rowidNone if you should choose the rowid yourself, else a 64 bit integer

    • fields – A tuple of values the same length and order as columns in your table

    Returns
    -

    If rowid was None then return the id you assigned -to the row. If rowid was not None then the return value +

    If rowid was None then return the id you assigned +to the row. If rowid was not None then the return value is ignored.

    @@ -487,30 +490,30 @@
    -

    VTCursor class

    +

    VTCursor class

    class VTCursor
    -
    +
    + +

    Note

    -
    -

    There is no actual VTCursor class - it is just shown this -way for documentation convenience. Your cursor instance should -implement all the methods documented here.

    -
    +

    There is no actual VTCursor class - it is shown this way for +documentation convenience and is present as a typing protocol. +Your cursor instance should implement all the methods documented +here.

    +

    The VTCursor object is used for iterating over a table. There may be many cursors simultaneously so each one needs to keep -track of where it is.

    +track of where Virtual table structure +it is.

    - - -
    -VTCursor.Close()
    +VTCursor.Close() None

    This is the destructor for the cursor. Note that you must cleanup. The method will not be called again if you raise an exception.

    @@ -518,7 +521,7 @@
    -VTCursor.Column(number)
    +VTCursor.Column(number: int) SQLiteValue

    Requests the value of the specified column number of the current row. If number is -1 then return the rowid.

    @@ -531,7 +534,7 @@
    -VTCursor.Eof() bool
    +VTCursor.Eof() bool

    Called to ask if we are at the end of the table. It is called after each call to Filter and Next.

    Returns
    @@ -548,7 +551,7 @@
    -VTCursor.Filter(indexnum, indexname, constraintargs)
    +VTCursor.Filter(indexnum: int, indexname: str, constraintargs: Optional[Tuple]) None

    This method is always called first to initialize an iteration to the first row of the table. The arguments come from the BestIndex() method in the table @@ -559,7 +562,7 @@

    -VTCursor.Next()
    +VTCursor.Next() None

    Move the cursor to the next row. Do not have an exception if there is no next row. Instead return False when Eof() is subsequently called.

    @@ -571,16 +574,13 @@
    -VTCursor.Rowid() 64 bit integer
    +VTCursor.Rowid() int

    Return the current rowid.

    -

    Troubleshooting virtual tables

    -

    Virtual Tables are a relatively recent addition to SQLite and haven’t -been widely used yet. They do work well if all your routines work -perfectly.

    +

    Troubleshooting virtual tables

    A big help is using the local variables recipe as described in augmented stack traces which will give you more details in errors, and shows an example with the complex @@ -606,23 +606,65 @@

    @@ -659,15 +701,15 @@
  • previous |
  • - +