diff -Nru fdb-1.6.1+dfsg1/debian/changelog fdb-2.0.0/debian/changelog --- fdb-1.6.1+dfsg1/debian/changelog 2016-12-06 06:56:00.000000000 +0000 +++ fdb-2.0.0/debian/changelog 2019-02-07 10:43:34.000000000 +0000 @@ -1,3 +1,9 @@ +fdb (2.0.0-1) unstable; urgency=low + + * New upstream version + + -- Russell Stuart Thu, 7 Feb 2019 20:43:34 +1000 + fdb (1.6.1+dfsg1-1) unstable; urgency=low * New upstream version diff -Nru fdb-1.6.1+dfsg1/debian/control fdb-2.0.0/debian/control --- fdb-1.6.1+dfsg1/debian/control 2016-07-24 10:00:39.000000000 +0000 +++ fdb-2.0.0/debian/control 2019-02-07 10:43:34.000000000 +0000 @@ -4,18 +4,20 @@ Priority: optional Build-Depends: debhelper (>= 9) Build-Depends-Indep: - python-all (>= 2.6), python3-all, - python-sphinx, libjs-sphinxdoc, + python-all (>= 2.7), python3-all, + python-sphinx, python-sphinx-bootstrap-theme, python-setuptools, python3-setuptools, + python-future, + libjs-sphinxdoc, dh-python Standards-Version: 3.9.8 -X-Python-Version: >= 2.6 -X-Python3-Version: >= 3.0 +X-Python-Version: >= 2.7 +X-Python3-Version: >= 3.4 Homepage: https://pypi.python.org/pypi/fdb/ Package: python-fdb Architecture: all -Depends: ${python:Depends}, ${misc:Depends}, libfbclient2 +Depends: ${python:Depends}, ${misc:Depends}, libfbclient2, python-future Suggests: python-fdb-doc Description: Python2 DB-API driver for Firebird FDB is a Python library package that implements Python Database API diff -Nru fdb-1.6.1+dfsg1/debian/copyright fdb-2.0.0/debian/copyright --- fdb-1.6.1+dfsg1/debian/copyright 2014-10-21 22:52:17.000000000 +0000 +++ fdb-2.0.0/debian/copyright 2019-02-07 10:43:34.000000000 +0000 @@ -2,7 +2,6 @@ Upstream-Name: fdb Upstream-Contact: Pavel Cisar Source: http://pypi.python.org/packages/source/f/fdb -Files-Excluded: docs/* Files: * Copyright: 2008-2014 Pavel Cisar and others diff -Nru fdb-1.6.1+dfsg1/debian/patches/backport-to-future-0.15.patch fdb-2.0.0/debian/patches/backport-to-future-0.15.patch --- fdb-1.6.1+dfsg1/debian/patches/backport-to-future-0.15.patch 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/debian/patches/backport-to-future-0.15.patch 2019-02-07 10:43:34.000000000 +0000 @@ -0,0 +1,15 @@ +Author: Russell Stuart +Description: backport to future-0.15 + Stretch only has python-future 0.15, but that is good enough. + +--- a/setup.py ++++ b/setup.py +@@ -52,7 +52,7 @@ + ], + keywords='Firebird', # Optional + packages=find_packages(), # Required +- install_requires=['future>=0.16.0'], # Optional ++ install_requires=['future>=0.15.0'], # Optional + python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4', + #test_suite='nose.collector', + project_urls={ diff -Nru fdb-1.6.1+dfsg1/debian/patches/disable-tests.patch fdb-2.0.0/debian/patches/disable-tests.patch --- fdb-1.6.1+dfsg1/debian/patches/disable-tests.patch 2016-07-24 10:53:45.000000000 +0000 +++ fdb-2.0.0/debian/patches/disable-tests.patch 2019-02-07 10:40:57.000000000 +0000 @@ -5,12 +5,12 @@ --- a/setup.py +++ b/setup.py -@@ -27,7 +27,7 @@ - install_requires=[], - setup_requires=[], - packages=find_packages(exclude=['ez_setup']), +@@ -54,7 +54,7 @@ + packages=find_packages(), # Required + install_requires=['future>=0.16.0'], # Optional + python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4', - test_suite='nose.collector', + #test_suite='nose.collector', - #include_package_data=True, - package_data={'': ['*.txt'], - 'test':'fbtest.fdb'}, + project_urls={ + 'Documentation': 'http://fdb2.readthedocs.io/en/latest/', + 'Bug Reports': 'http://tracker.firebirdsql.org/browse/PYFB', diff -Nru fdb-1.6.1+dfsg1/debian/patches/series fdb-2.0.0/debian/patches/series --- fdb-1.6.1+dfsg1/debian/patches/series 2014-07-29 05:59:54.000000000 +0000 +++ fdb-2.0.0/debian/patches/series 2019-02-07 10:43:34.000000000 +0000 @@ -1,2 +1,3 @@ remove-googleapis.patch disable-tests.patch +backport-to-future-0.15.patch diff -Nru fdb-1.6.1+dfsg1/debian/rules fdb-2.0.0/debian/rules --- fdb-1.6.1+dfsg1/debian/rules 2016-07-24 09:41:38.000000000 +0000 +++ fdb-2.0.0/debian/rules 2019-02-07 10:43:34.000000000 +0000 @@ -13,15 +13,18 @@ get-orig-source: uscan --force-download -override_dh_auto_build: +override_dh_auto_build: $(PYBUILD_NAME).egg-info.orig dh_auto_build rm -rf docs $(MAKE) --directory sphinx html rm -rf html mv docs html +override_dh_auto_test: + # Test doesn't work without root. + override_dh_installdocs: - dh_installdocs --no-package=$(DOC_PACKAGE) README + dh_installdocs --no-package=$(DOC_PACKAGE) README.rst dh_installdocs --package=$(DOC_PACKAGE) html dh_sphinxdoc --package=$(DOC_PACKAGE) $(DOC_DIR) debian/ras-sphinxdoc.sh $(DOC_PACKAGE)/$(DOC_DIR) @@ -35,7 +38,7 @@ dh_auto_clean $(MAKE) --directory sphinx clean [ ! -d $(PYBUILD_NAME).egg-info.orig ] || { rm -rf $(PYBUILD_NAME).egg-info; mv $(PYBUILD_NAME).egg-info.orig $(PYBUILD_NAME).egg-info; } - rm -rf html debian/missing-sources + rm -rf html debian/missing-sources .eggs override_dh_auto_install: $(PYBUILD_NAME).egg-info.orig dh_auto_install diff -Nru fdb-1.6.1+dfsg1/fdb/blr.py fdb-2.0.0/fdb/blr.py --- fdb-1.6.1+dfsg1/fdb/blr.py 2014-11-13 14:55:46.000000000 +0000 +++ fdb-2.0.0/fdb/blr.py 2018-04-26 14:39:03.000000000 +0000 @@ -2,7 +2,7 @@ # # PROGRAM/MODULE: fdb # FILE: blr.py -# DESCRIPTION: BLR-related definitions +# DESCRIPTION: Python driver for Firebird - BLR-related definitions # CREATED: 12.6.2013 # # Software distributed under the License is distributed AS IS, @@ -12,7 +12,7 @@ # # The Original Code was created by Pavel Cisar # -# Copyright (c) 2011 Pavel Cisar +# Copyright (c) Pavel Cisar # and all contributors signed below. # # All Rights Reserved. @@ -21,6 +21,42 @@ # # See LICENSE.TXT for details. +# BLR data types are defined in fdb.ibase + +#blr_text = 14 +#blr_text2 = 15 +#blr_short = 7 +#blr_long = 8 +#blr_quad = 9 +#blr_float = 10 +#blr_double = 27 +#blr_d_float = 11 +#blr_timestamp = 35 +#blr_varying = 37 +#blr_varying2 = 38 +#blr_blob = 261 +#blr_cstring = 40 +#blr_cstring2 = 41 +#blr_blob_id = 45 +#blr_sql_date = 12 +#blr_sql_time = 13 +#blr_int64 = 16 +#blr_blob2 = 17 +#blr_domain_name = 18 +#blr_domain_name2 = 19 +#blr_not_nullable = 20 +#blr_column_name = 21 +#blr_column_name2 = 22 +#blr_bool = 23 # Firebird 3.0 + +# first sub parameter for blr_domain_name[2] +blr_domain_type_of = 0 +blr_domain_full = 1 + +# Historical alias for pre V6 applications +blr_date = 35 # blr_timestamp + +# Other BLR codes blr_inner = 0 blr_left = 1 @@ -34,6 +70,9 @@ blr_default_code = 4 blr_raise = 5 blr_exception_msg = 6 +# Firebird 3.0 +blr_exception_params = 7 +blr_sql_state = 8 blr_version4 = 4 blr_version5 = 5 @@ -70,6 +109,7 @@ blr_maximum = 29 blr_minimum = 30 blr_total = 31 +# unused codes 32..33 blr_add = 34 blr_subtract = 35 blr_multiply = 36 @@ -101,6 +141,7 @@ blr_missing = 61 blr_unique = 62 blr_like = 63 +# unused codes 64..66 blr_rse = 67 blr_first = 68 blr_project = 69 @@ -115,20 +156,23 @@ blr_group_by = 78 blr_aggregate = 79 blr_join_type = 80 +# unused codes 81..82 blr_agg_count = 83 blr_agg_max = 84 blr_agg_min = 85 blr_agg_total = 86 blr_agg_average = 87 blr_parameter3 = 88 -blr_run_max = 89 -blr_run_min = 90 -blr_run_total = 91 -blr_run_average = 92 +# Unsupported +#blr_run_max = 89 +#blr_run_min = 90 +#blr_run_total = 91 +#blr_run_average = 92 blr_agg_count2 = 93 blr_agg_count_distinct = 94 blr_agg_total_distinct = 95 blr_agg_average_distinct = 96 +# unused codes 97..99 blr_function = 100 blr_gen_id = 101 blr_prot_mask = 102 @@ -138,17 +182,14 @@ blr_matching2 = 106 blr_index = 107 blr_ansi_like = 108 -blr_seek = 112 - -blr_continue = 0 -blr_forward = 1 -blr_backward = 2 -blr_bof_forward = 3 -blr_eof_backward = 4 +blr_scrollable = 109 # Firebird 3.0 +#blr_seek = 112 # Defined in FB < 3.0 +# unused codes 110..117 blr_run_count = 118 blr_rs_stream = 119 blr_exec_proc = 120 +# unused codes 121..123 blr_procedure = 124 blr_pid = 125 blr_exec_pid = 126 @@ -157,8 +198,13 @@ blr_block = 129 blr_error_handler = 130 blr_cast = 131 +# Firebird 3.0 +blr_pid2 = 132 +blr_procedure2 = 133 +# blr_start_savepoint = 134 blr_end_savepoint = 135 +#unused codes 136..138 # Access plan items blr_plan = 139 @@ -171,14 +217,24 @@ blr_relation2 = 146 blr_rid2 = 147 +# unused codes 148..149 blr_set_generator = 150 blr_ansi_any = 151 blr_exists = 152 +# unused codes 153 blr_record_version = 154 blr_stall = 155 +# unused codes 156..157 blr_ansi_all = 158 blr_extract = 159 +# these indicate directions for blr_seek and blr_find +blr_continue = 0 +blr_forward = 1 +blr_backward = 2 +blr_bof_forward = 3 +blr_eof_backward = 4 + # sub parameters for blr_extract blr_extract_year = 0 blr_extract_month = 1 @@ -207,6 +263,7 @@ blr_agg_list = 170 blr_agg_list_distinct = 171 blr_modify2 = 172 +# unused codes 173 # FB 1.0 specific BLR blr_current_role = 174 @@ -251,6 +308,15 @@ blr_cursor_open = 0 blr_cursor_close = 1 blr_cursor_fetch = 2 +blr_cursor_fetch_scroll = 3 # Firebird 3.0 + +# Scroll options (FB 3.0) +blr_croll_forward = 0 +blr_croll_backward = 1 +blr_croll_bof = 2 +blr_croll_eof = 3 +blr_croll_absolute = 4 +blr_croll_relative = 5 # FB 2.1 specific BLR @@ -283,3 +349,24 @@ blr_stmt_expr = 190 blr_derived_expr = 191 +# FB 3.0 specific BLR + +blr_procedure3 = 192 +blr_exec_proc2 = 193 +blr_function2 = 194 +blr_window = 195 +blr_partition_by = 196 +blr_continue_loop = 197 +blr_procedure4 = 198 +blr_agg_function = 199 +blr_substring_similar = 200 +blr_bool_as_value = 201 +blr_coalesce = 202 +blr_decode = 203 +blr_exec_subproc = 204 +blr_subproc_decl = 205 +blr_subproc = 206 +blr_subfunc_decl = 207 +blr_subfunc = 208 +blr_record_version2 = 209 +blr_gen_id2 = 210 # NEXT VALUE FOR generator diff -Nru fdb-1.6.1+dfsg1/fdb/fbcore.py fdb-2.0.0/fdb/fbcore.py --- fdb-1.6.1+dfsg1/fdb/fbcore.py 2016-11-28 13:21:50.000000000 +0000 +++ fdb-2.0.0/fdb/fbcore.py 2018-04-27 11:40:21.000000000 +0000 @@ -2,7 +2,7 @@ # # PROGRAM/MODULE: fdb # FILE: fbcore.py -# DESCRIPTION: Python driver for Firebird +# DESCRIPTION: Python driver for Firebird - Core # CREATED: 8.10.2011 # # Software distributed under the License is distributed AS IS, @@ -12,7 +12,7 @@ # # The Original Code was created by Pavel Cisar # -# Copyright (c) 2011 Pavel Cisar +# Copyright (c) Pavel Cisar # and all contributors signed below. # # All Rights Reserved. @@ -30,6 +30,7 @@ import decimal import weakref import threading +from builtins import dict from . import ibase from . import schema @@ -43,129 +44,164 @@ # Python 3 from itertools import zip_longest as izip_longest from fdb.ibase import (frb_info_att_charset, isc_dpb_activate_shadow, - isc_dpb_address_path, isc_dpb_allocation, isc_dpb_begin_log, - isc_dpb_buffer_length, isc_dpb_cache_manager, isc_dpb_cdd_pathname, - isc_dpb_connect_timeout, isc_dpb_damaged, isc_dpb_dbkey_scope, - isc_dpb_debug, isc_dpb_delete_shadow, isc_dpb_disable_journal, - isc_dpb_disable_wal, isc_dpb_drop_walfile, - isc_dpb_dummy_packet_interval, isc_dpb_enable_journal, - isc_dpb_encrypt_key, isc_dpb_force_write, isc_dpb_garbage_collect, - isc_dpb_gbak_attach, isc_dpb_gfix_attach, isc_dpb_gsec_attach, - isc_dpb_gstat_attach, isc_dpb_interp, isc_dpb_journal, - isc_dpb_lc_ctype, isc_dpb_lc_messages, isc_dpb_license, - isc_dpb_no_garbage_collect, isc_dpb_no_reserve, - isc_dpb_num_buffers, isc_dpb_number_of_users, isc_dpb_old_dump_id, - isc_dpb_old_file, isc_dpb_old_file_size, isc_dpb_old_num_files, - isc_dpb_old_start_file, isc_dpb_old_start_page, - isc_dpb_old_start_seqno, isc_dpb_online, isc_dpb_online_dump, - isc_dpb_overwrite, isc_dpb_page_size, isc_dpb_password, - isc_dpb_password_enc, isc_dpb_quit_log, isc_dpb_reserved, - isc_dpb_sec_attach, isc_dpb_set_db_charset, - isc_dpb_set_db_readonly, isc_dpb_set_db_sql_dialect, - isc_dpb_set_page_buffers, isc_dpb_shutdown, isc_dpb_shutdown_delay, - isc_dpb_sql_dialect, isc_dpb_sql_role_name, isc_dpb_sweep, - isc_dpb_sweep_interval, isc_dpb_sys_user_name, - isc_dpb_sys_user_name_enc, isc_dpb_trace, isc_dpb_user_name, - isc_dpb_verify, isc_dpb_version1, isc_dpb_wal_backup_dir, - isc_dpb_wal_bufsize, isc_dpb_wal_chkptlen, - isc_dpb_wal_grp_cmt_wait, isc_dpb_wal_numbufs, - isc_dpb_working_directory, isc_dpb_no_db_triggers, isc_dpb_nolinger, - isc_info_active_tran_count, isc_info_end, isc_info_truncated, - isc_info_sql_stmt_type, isc_info_sql_get_plan, isc_info_sql_records, - isc_info_req_select_count, isc_info_req_insert_count, - isc_info_req_update_count, isc_info_req_delete_count, - isc_info_blob_total_length, isc_info_blob_max_segment, - fb_info_page_contents, - isc_info_active_transactions, isc_info_allocation, - isc_info_attachment_id, isc_info_backout_count, - isc_info_base_level, isc_info_bpage_errors, isc_info_creation_date, - isc_info_cur_log_part_offset, isc_info_cur_logfile_name, - isc_info_current_memory, - isc_info_db_class, isc_info_db_id, isc_info_db_provider, - isc_info_db_read_only, isc_info_db_size_in_pages, - isc_info_db_sql_dialect, isc_info_delete_count, - isc_info_dpage_errors, isc_info_expunge_count, isc_info_fetches, - isc_info_firebird_version, isc_info_forced_writes, - isc_info_implementation, isc_info_insert_count, - isc_info_ipage_errors, isc_info_isc_version, isc_info_license, - isc_info_limbo, isc_info_logfile, isc_info_marks, - isc_info_max_memory, isc_info_next_transaction, - isc_info_no_reserve, isc_info_num_buffers, - isc_info_num_wal_buffers, isc_info_ods_minor_version, - isc_info_ods_version, isc_info_oldest_active, - isc_info_oldest_snapshot, isc_info_oldest_transaction, - isc_info_page_errors, isc_info_page_size, isc_info_ppage_errors, - isc_info_purge_count, isc_info_read_idx_count, - isc_info_read_seq_count, isc_info_reads, isc_info_record_errors, - isc_info_set_page_buffers, isc_info_sql_stmt_commit, - isc_info_sql_stmt_ddl, isc_info_sql_stmt_delete, - isc_info_sql_stmt_exec_procedure, isc_info_sql_stmt_get_segment, - isc_info_sql_stmt_insert, isc_info_sql_stmt_put_segment, - isc_info_sql_stmt_rollback, isc_info_sql_stmt_savepoint, - isc_info_sql_stmt_select, isc_info_sql_stmt_select_for_upd, - isc_info_sql_stmt_set_generator, isc_info_sql_stmt_start_trans, - isc_info_sql_stmt_update, isc_info_sweep_interval, - isc_info_tpage_errors, isc_info_tra_access, - isc_info_tra_concurrency, isc_info_tra_consistency, - isc_info_tra_id, isc_info_tra_isolation, isc_info_tra_lock_timeout, - isc_info_tra_no_rec_version, isc_info_tra_oldest_active, - isc_info_tra_oldest_interesting, isc_info_tra_oldest_snapshot, - isc_info_tra_read_committed, isc_info_tra_readonly, - isc_info_tra_readwrite, isc_info_tra_rec_version, fb_info_tra_dbpath, - isc_info_update_count, isc_info_user_names, isc_info_version, - isc_info_wal_avg_grpc_size, isc_info_wal_avg_io_size, - isc_info_wal_buffer_size, isc_info_wal_ckpt_length, - isc_info_wal_cur_ckpt_interval, isc_info_wal_grpc_wait_usecs, - isc_info_wal_num_commits, isc_info_wal_num_io, - isc_info_wal_prv_ckpt_fname, isc_info_wal_prv_ckpt_poffset, - isc_info_wal_recv_ckpt_fname, isc_info_wal_recv_ckpt_poffset, - isc_info_window_turns, isc_info_writes, isc_tpb_autocommit, - - isc_tpb_commit_time, isc_tpb_concurrency, isc_tpb_consistency, - isc_tpb_exclusive, isc_tpb_ignore_limbo, isc_tpb_lock_read, - isc_tpb_lock_timeout, isc_tpb_lock_write, isc_tpb_no_auto_undo, - isc_tpb_no_rec_version, isc_tpb_nowait, isc_tpb_protected, - isc_tpb_read, isc_tpb_read_committed, isc_tpb_rec_version, - isc_tpb_restart_requests, isc_tpb_shared, isc_tpb_verb_time, - isc_tpb_version3, isc_tpb_wait, isc_tpb_write, - - b, s, ord2, int2byte, mychr, mybytes, myunicode, mylong, StringType, - IntType, LongType, FloatType, ListType, UnicodeType, TupleType, xrange, - charset_map, - - #isc_sqlcode, isc_sql_interprete, fb_interpret, isc_dsql_execute_immediate, - XSQLDA_PTR, ISC_SHORT, ISC_LONG, ISC_SCHAR, ISC_UCHAR, ISC_QUAD, - SHRT_MIN, SHRT_MAX, USHRT_MAX, INT_MIN, INT_MAX, LONG_MIN, LONG_MAX, - - - SQL_TEXT, SQL_VARYING, SQL_SHORT, SQL_LONG, SQL_FLOAT, SQL_DOUBLE, - SQL_D_FLOAT, SQL_TIMESTAMP, SQL_BLOB, SQL_ARRAY, SQL_QUAD, SQL_TYPE_TIME, - SQL_TYPE_DATE, SQL_INT64, SQL_BOOLEAN, SUBTYPE_NUMERIC, SUBTYPE_DECIMAL, - MAX_BLOB_SEGMENT_SIZE, ISC_INT64, - - XSQLVAR, ISC_TEB, RESULT_VECTOR, ISC_STATUS, ISC_STATUS_ARRAY, ISC_STATUS_PTR, - ISC_EVENT_CALLBACK, ISC_ARRAY_DESC, - - blr_varying, blr_varying2, blr_text, blr_text2, blr_short, blr_long, - blr_int64, blr_float, blr_d_float, blr_double, blr_timestamp, blr_sql_date, - blr_sql_time, - - SQLDA_version1, isc_segment, - - isc_db_handle, isc_tr_handle, isc_stmt_handle, isc_blob_handle, - - fbclient_API, - - ) + isc_dpb_address_path, isc_dpb_allocation, isc_dpb_begin_log, + isc_dpb_buffer_length, isc_dpb_cache_manager, isc_dpb_cdd_pathname, + isc_dpb_connect_timeout, isc_dpb_damaged, isc_dpb_dbkey_scope, + isc_dpb_debug, isc_dpb_delete_shadow, + isc_dpb_dummy_packet_interval, + isc_dpb_encrypt_key, isc_dpb_force_write, isc_dpb_garbage_collect, + isc_dpb_gbak_attach, isc_dpb_gfix_attach, isc_dpb_gsec_attach, + isc_dpb_gstat_attach, isc_dpb_interp, + isc_dpb_lc_ctype, isc_dpb_lc_messages, + isc_dpb_no_garbage_collect, isc_dpb_no_reserve, + isc_dpb_num_buffers, isc_dpb_number_of_users, isc_dpb_old_dump_id, + isc_dpb_old_file, isc_dpb_old_file_size, isc_dpb_old_num_files, + isc_dpb_old_start_file, isc_dpb_old_start_page, + isc_dpb_old_start_seqno, isc_dpb_online, isc_dpb_online_dump, + isc_dpb_overwrite, isc_dpb_page_size, isc_dpb_password, + isc_dpb_password_enc, isc_dpb_reserved, + isc_dpb_sec_attach, isc_dpb_set_db_charset, + isc_dpb_set_db_readonly, isc_dpb_set_db_sql_dialect, + isc_dpb_set_page_buffers, isc_dpb_shutdown, isc_dpb_shutdown_delay, + isc_dpb_sql_dialect, isc_dpb_sql_role_name, isc_dpb_sweep, + isc_dpb_sweep_interval, isc_dpb_sys_user_name, + isc_dpb_sys_user_name_enc, isc_dpb_trace, isc_dpb_user_name, + isc_dpb_verify, isc_dpb_version1, + isc_dpb_working_directory, isc_dpb_no_db_triggers, isc_dpb_nolinger, + isc_info_active_tran_count, isc_info_end, isc_info_truncated, + isc_info_sql_stmt_type, isc_info_sql_get_plan, isc_info_sql_records, + isc_info_req_select_count, isc_info_req_insert_count, + isc_info_req_update_count, isc_info_req_delete_count, + isc_info_blob_total_length, isc_info_blob_max_segment, + isc_info_blob_type, isc_info_blob_num_segments, + fb_info_page_contents, + isc_info_active_transactions, isc_info_allocation, + isc_info_attachment_id, isc_info_backout_count, + isc_info_base_level, isc_info_bpage_errors, isc_info_creation_date, + isc_info_current_memory, + isc_info_db_class, isc_info_db_id, isc_info_db_provider, + isc_info_db_read_only, isc_info_db_size_in_pages, + isc_info_db_sql_dialect, isc_info_delete_count, + isc_info_dpage_errors, isc_info_expunge_count, isc_info_fetches, + isc_info_firebird_version, isc_info_forced_writes, + isc_info_implementation, isc_info_insert_count, + isc_info_ipage_errors, isc_info_isc_version, + isc_info_limbo, isc_info_marks, isc_info_max_memory, + isc_info_next_transaction, isc_info_no_reserve, isc_info_num_buffers, + isc_info_ods_minor_version, isc_info_ods_version, isc_info_oldest_active, + isc_info_oldest_snapshot, isc_info_oldest_transaction, + isc_info_page_errors, isc_info_page_size, isc_info_ppage_errors, + isc_info_purge_count, isc_info_read_idx_count, + isc_info_read_seq_count, isc_info_reads, isc_info_record_errors, + isc_info_set_page_buffers, isc_info_sql_stmt_commit, + isc_info_sql_stmt_ddl, isc_info_sql_stmt_delete, + isc_info_sql_stmt_exec_procedure, isc_info_sql_stmt_get_segment, + isc_info_sql_stmt_insert, isc_info_sql_stmt_put_segment, + isc_info_sql_stmt_rollback, isc_info_sql_stmt_savepoint, + isc_info_sql_stmt_select, isc_info_sql_stmt_select_for_upd, + isc_info_sql_stmt_set_generator, isc_info_sql_stmt_start_trans, + isc_info_sql_stmt_update, isc_info_sweep_interval, + isc_info_tpage_errors, isc_info_tra_access, + isc_info_tra_concurrency, isc_info_tra_consistency, + isc_info_tra_id, isc_info_tra_isolation, isc_info_tra_lock_timeout, + isc_info_tra_no_rec_version, isc_info_tra_oldest_active, + isc_info_tra_oldest_interesting, isc_info_tra_oldest_snapshot, + isc_info_tra_read_committed, isc_info_tra_readonly, + isc_info_tra_readwrite, isc_info_tra_rec_version, fb_info_tra_dbpath, + isc_info_update_count, isc_info_user_names, isc_info_version, + isc_info_writes, isc_tpb_autocommit, + # FB 3 + isc_dpb_version2, + fb_info_implementation, fb_info_page_warns, fb_info_record_warns, + fb_info_bpage_warns, fb_info_dpage_warns, fb_info_ipage_warns, + fb_info_ppage_warns, fb_info_tpage_warns, fb_info_pip_errors, fb_info_pip_warns, + # + isc_tpb_commit_time, isc_tpb_concurrency, isc_tpb_consistency, + isc_tpb_exclusive, isc_tpb_ignore_limbo, isc_tpb_lock_read, + isc_tpb_lock_timeout, isc_tpb_lock_write, isc_tpb_no_auto_undo, + isc_tpb_no_rec_version, isc_tpb_nowait, isc_tpb_protected, + isc_tpb_read, isc_tpb_read_committed, isc_tpb_rec_version, + isc_tpb_restart_requests, isc_tpb_shared, isc_tpb_verb_time, + isc_tpb_version3, isc_tpb_wait, isc_tpb_write, + # + b, s, ord2, int2byte, mychr, mybytes, myunicode, mylong, StringType, + IntType, LongType, FloatType, ListType, UnicodeType, TupleType, xrange, + charset_map, + #isc_sqlcode, isc_sql_interprete, fb_interpret, isc_dsql_execute_immediate, + XSQLDA_PTR, ISC_SHORT, ISC_LONG, ISC_SCHAR, ISC_UCHAR, ISC_QUAD, + ISC_DATE, ISC_TIME, + SHRT_MIN, SHRT_MAX, USHRT_MAX, INT_MIN, INT_MAX, LONG_MIN, LONG_MAX, + # + SQL_TEXT, SQL_VARYING, SQL_SHORT, SQL_LONG, SQL_FLOAT, SQL_DOUBLE, + SQL_D_FLOAT, SQL_TIMESTAMP, SQL_BLOB, SQL_ARRAY, SQL_QUAD, SQL_TYPE_TIME, + SQL_TYPE_DATE, SQL_INT64, SQL_BOOLEAN, SUBTYPE_NUMERIC, SUBTYPE_DECIMAL, + MAX_BLOB_SEGMENT_SIZE, ISC_INT64, + # + XSQLVAR, ISC_TEB, RESULT_VECTOR, ISC_STATUS, ISC_STATUS_ARRAY, + ISC_STATUS_PTR, ISC_EVENT_CALLBACK, ISC_ARRAY_DESC, + # + blr_varying, blr_varying2, blr_text, blr_text2, blr_short, blr_long, + blr_int64, blr_float, blr_d_float, blr_double, blr_timestamp, blr_sql_date, + blr_sql_time, blr_cstring, blr_quad, blr_blob, blr_bool, + # + SQLDA_version1, isc_segment, + isc_db_handle, isc_tr_handle, isc_stmt_handle, isc_blob_handle, + sys_encoding) PYTHON_MAJOR_VER = sys.version_info[0] -__version__ = '1.6.1' +#: Current driver version +__version__ = '2.0.0' apilevel = '2.0' threadsafety = 1 paramstyle = 'qmark' +HOOK_API_LOADED = 1 +HOOK_DATABASE_ATTACHED = 2 +HOOK_DATABASE_ATTACH_REQUEST = 3 +HOOK_DATABASE_DETACH_REQUEST = 4 +HOOK_DATABASE_CLOSED = 5 +HOOK_SERVICE_ATTACHED = 6 + +hooks = {} + +def add_hook(hook_type, func): + """Instals hook function for specified hook_type. + + :param hook_type: One from HOOK_* constants + :param func: Hook routine to be installed + + .. important:: + + Routine must have a signature required for given hook type. + However it's not checked when hook is installed, and any + issue will lead to run-time error when hook routine is executed. + """ + hooks.setdefault(hook_type, list()).append(func) + +def remove_hook(hook_type, func): + """Uninstalls previously installed hook function for + specified hook_type. + + :param hook_type: One from HOOK_* constants + :param func: Hook routine to be uninstalled + + If hook routine wasn't previously installed, it does nothing. + """ + try: + hooks.get(hook_type, list()).remove(func) + except: + pass + +def get_hooks(hook_type): + """Returns list of installed hook routines for specified hook_type. + + :param hook_type: One from HOOK_* constants + :returns: List of installed hook routines. + """ + return hooks.get(hook_type, list()) + def load_api(fb_library_name=None): """Initializes bindings to Firebird Client Library unless they are already initialized. Called automatically by :func:`fdb.connect` and :func:`fdb.create_database`. @@ -174,18 +210,20 @@ When it's not specified, FDB does its best to locate appropriate client library. :returns: :class:`fdb.ibase.fbclient_API` instance. + + Hooks: + + Event HOOK_API_LOADED: Executed after api is initialized. Hook routine must + have signature: hook_func(api). Any value returned by hook is ignored. """ - if not hasattr(sys.modules[__name__],'api'): - setattr(sys.modules[__name__],'api',fbclient_API(fb_library_name)) - return getattr(sys.modules[__name__],'api') + if not hasattr(sys.modules[__name__], 'api'): + setattr(sys.modules[__name__], 'api', ibase.fbclient_API(fb_library_name)) + for hook in get_hooks(HOOK_API_LOADED): + hook(getattr(sys.modules[__name__], 'api')) + return getattr(sys.modules[__name__], 'api') # Exceptions required by Python Database API -class Warning(Exception): - """Exception raised for important warnings like data - truncations while inserting, etc.""" - pass - class Error(Exception): """Exception that is the base class of all other error exceptions. You can use this to catch all errors with one @@ -246,6 +284,9 @@ class TransactionConflict(DatabaseError): pass +class ParseError(Exception): + pass + # Named positional constants to be used as indices into the description # attribute of a cursor (these positions are defined by the DB API spec). # For example: @@ -267,6 +308,9 @@ def Time(hour, minite, second): return datetime.time(hour, minite, second) +def Timestamp(year, month, day, hour, minute, second): + return datetime.datetime(year, month, day, hour, minute, second) + def DateFromTicks(ticks): return apply(Date, time.localtime(ticks)[:3]) @@ -347,15 +391,15 @@ s = 'isc_info_db_impl_' q = [x for x in d if x.startswith(s) and x[len(s):] != 'last_value'] #: Dictionary to map Implementation codes to names -IMPLEMENTATION_NAMES = dict(zip([getattr(ibase,x) for x in q],[x[len(s):] for x in q])) +IMPLEMENTATION_NAMES = dict(zip([getattr(ibase, x) for x in q], [x[len(s):] for x in q])) s = 'isc_info_db_code_' q = [x for x in d if x.startswith(s) and x[len(s):] != 'last_value'] #: Dictionary to map provider codes to names -PROVIDER_NAMES = dict(zip([getattr(ibase,x) for x in q],[x[len(s):] for x in q])) +PROVIDER_NAMES = dict(zip([getattr(ibase, x) for x in q], [x[len(s):] for x in q])) s = 'isc_info_db_class_' q = [x for x in d if x.startswith(s) and x[len(s):] != 'last_value'] #: Dictionary to map database class codes to names -DB_CLASS_NAMES = dict(zip([getattr(ibase,x) for x in q],[x[len(s):] for x in q])) +DB_CLASS_NAMES = dict(zip([getattr(ibase, x) for x in q], [x[len(s):] for x in q])) # Private constants @@ -373,15 +417,15 @@ isc_info_allocation, isc_info_no_reserve, isc_info_db_sql_dialect, isc_info_ods_minor_version, isc_info_ods_version, isc_info_page_size, isc_info_current_memory, isc_info_forced_writes, isc_info_max_memory, - isc_info_num_buffers, isc_info_sweep_interval, isc_info_limbo, + isc_info_num_buffers, isc_info_sweep_interval, isc_info_attachment_id, isc_info_fetches, isc_info_marks, isc_info_reads, isc_info_writes, isc_info_set_page_buffers, isc_info_db_read_only, isc_info_db_size_in_pages, isc_info_page_errors, isc_info_record_errors, isc_info_bpage_errors, isc_info_dpage_errors, isc_info_ipage_errors, - isc_info_ppage_errors, isc_info_tpage_errors,frb_info_att_charset, + isc_info_ppage_errors, isc_info_tpage_errors, frb_info_att_charset, isc_info_oldest_transaction, isc_info_oldest_active, isc_info_oldest_snapshot, isc_info_next_transaction, - isc_info_active_tran_count,isc_info_db_class,isc_info_db_provider, + isc_info_active_tran_count, isc_info_db_class, isc_info_db_provider, ) _DATABASE_INFO_CODES_WITH_COUNT_RESULTS = ( isc_info_backout_count, isc_info_delete_count, isc_info_expunge_count, @@ -426,7 +470,8 @@ buf_pointer = ctypes.POINTER(ctypes.c_char) def is_dead_proxy(obj): - return isinstance(obj,weakref.ProxyType) and not dir(obj) + "Return True if object is a dead :func:`weakref.proxy`." + return isinstance(obj, weakref.ProxyType) and not dir(obj) def b2u(st, charset): "Decode to unicode if charset is defined. For conversion of result set data." @@ -445,9 +490,9 @@ def inc_pointer(pointer): t = type(pointer) - p = ctypes.cast(pointer,ctypes.c_void_p) + p = ctypes.cast(pointer, ctypes.c_void_p) p.value += 1 - return ctypes.cast(p,t) + return ctypes.cast(p, t) def bytes_to_bint(b): # Read as big endian len_b = len(b) @@ -543,104 +588,115 @@ error_code = status[1] msglist.append('- SQLCODE: %i' % sqlcode) - #isc_sql_interprete(sqlcode, msg, 512) - #if PYTHON_MAJOR_VER == 3: - #### Todo: trouble? decode from connection charset? - #msglist.append('- ' + (msg.value).decode('utf_8')) - #else: - #msglist.append('- ' + msg.value) - pvector = ctypes.cast(ctypes.addressof(status), ISC_STATUS_PTR) while True: result = api.fb_interpret(msg, 512, pvector) if result != 0: if PYTHON_MAJOR_VER == 3: - ### Todo: trouble? decode from connection charset? - msglist.append('- ' + (msg.value).decode('utf_8')) + msglist.append('- ' + (msg.value).decode(sys_encoding)) else: msglist.append('- ' + msg.value) else: break return error('\n'.join(msglist), sqlcode, error_code) -def build_dpb(user, password, sql_dialect, role, charset, buffers, force_write, - no_reserve, db_key_scope, no_gc, no_db_triggers, no_linger): - params = [int2byte(isc_dpb_version1)] - - def addString(codeAsByte, s): - if PYTHON_MAJOR_VER == 3 or isinstance(s,UnicodeType): - s = s.encode(charset_map.get(charset, charset)) - sLen = len(s) - if sLen >= 256: + +class ParameterBuffer(object): + """Helper class for construction of Database (and other) parameter + buffers. Parameters are stored in insertion order.""" + def __init__(self, charset): + self.items = [] + self.charset = charset + def add_parameter_code(self, code): + """Add parameter code to parameter buffer. + + :param code: Firebird code for the parameter + """ + self.items.append(struct.pack('c', int2byte(code))) + def add_string_parameter(self, code, value): + """Add string to parameter buffer. + + :param code: Firebird code for the parameter + :param string value: Parameter value + """ + if PYTHON_MAJOR_VER == 3 or isinstance(value, UnicodeType): + value = value.encode(charset_map.get(self.charset, self.charset)) + slen = len(value) + if slen >= 256: # Because the length is denoted in the DPB by a single byte. - raise ProgrammingError("Individual component of database" - " parameter buffer is too large. Components must be less" - " than 256 bytes." - ) - myformat = 'cc%ds' % sLen # like 'cc50s' for a 50-byte string - newEntry = struct.pack(myformat, int2byte(codeAsByte), - int2byte(sLen), s) - params.append(newEntry) - - def addByte(codeAsByte, value): - if (not isinstance(value, (int, mylong)) - or value < 0 or value > 255): - raise ProgrammingError("The value for an integer DPB code must be" - " an int or long with a value between 0 and 255." - ) - newEntry = struct.pack('ccc', int2byte(codeAsByte), - b('\x01'), int2byte(value)) - params.append(newEntry) - def addInt(codeAsByte, value): + raise ProgrammingError("""Too large parameter buffer component (>256 bytes).""") + self.items.append(struct.pack('cc%ds' % slen, int2byte(code), int2byte(slen), value)) + def add_byte_parameter(self, code, value): + """Add byte value to parameter buffer. + + :param code: Firebird code for the parameter + :param value: Parameter value (0-255) + """ + if not isinstance(value, (int, mylong)) or value < 0 or value > 255: + raise ProgrammingError("The value must be an int or long value between 0 and 255.") + self.items.append(struct.pack('ccc', int2byte(code), b('\x01'), int2byte(value))) + def add_integer_parameter(self, code, value): + """Add integer value to parameter buffer. + + :param code: Firebird code for the parameter + :param int value: Parameter value + """ if not isinstance(value, (int, mylong)): - raise ProgrammingError("The value for an integer DPB code must be" - " an int or long." - ) - newEntry = struct.pack('=ccI', int2byte(codeAsByte), - b('\x04'), value) - params.append(newEntry) - - if user: - addString(isc_dpb_user_name, user) - if password: - addString(isc_dpb_password, password) - if role: - addString(isc_dpb_sql_role_name, role) - if sql_dialect: - addByte(isc_dpb_sql_dialect, sql_dialect) - if charset: - addString(isc_dpb_lc_ctype, charset.upper()) - if buffers: - addInt(isc_dpb_num_buffers, buffers) - if force_write: - addByte(isc_dpb_force_write, force_write) - if no_reserve: - addByte(isc_dpb_no_reserve, no_reserve) - if db_key_scope: - addByte(isc_dpb_dbkey_scope, db_key_scope) - if no_gc: - addByte(isc_dpb_no_garbage_collect, no_gc) - if no_db_triggers: - addByte(isc_dpb_no_db_triggers, no_db_triggers) - if no_linger: - addByte(isc_dpb_nolinger, no_linger) - return b('').join(params) + raise ProgrammingError("The value for an integer DPB code must be an int or long.") + self.items.append(struct.pack('=ccI', int2byte(code), b('\x04'), value)) + def add_byte(self, byte): + """Add byte value to buffer. + + :param byte: Value to be added. + """ + self.items.append(struct.pack('b', byte)) + def add_word(self, word): + """Add two byte value to buffer. + + :param word: Value to be added. + """ + self.items.append(struct.pack('= 256: + # Because the length is denoted in the DPB by a single byte. + raise ProgrammingError("Individual component of" + " parameter buffer is too large. Components must be less" + " than 256 bytes.") + self.items.append(struct.pack('cc%ds' % slen, int2byte(slen), value)) + def get_buffer(self): + """Get parameter buffer content. + + :returns: Byte string with all inserted parameters. + """ + return b('').join(self.items) + def clear(self): + "Clear all parameters stored in parameter buffer." + self.items = [] + def get_length(self): + "Returns actual total length of parameter buffer." + return sum((len(x) for x in self.items)) + + -def connect(dsn='', user=None, password=None, host=None, port=3050, database=None, +def connect(dsn='', user=None, password=None, host=None, port=None, database=None, sql_dialect=3, role=None, charset=None, buffers=None, force_write=None, no_reserve=None, db_key_scope=None, isolation_level=ISOLATION_LEVEL_READ_COMMITED, connection_class=None, fb_library_name=None, no_gc=None, no_db_triggers=None, no_linger=None): - """ - Establish a connection to database. + """Establish a connection to database. :param dsn: Connection string in format [host[/port]]:database :param string user: User name. If not specified, fdb attempts to use ISC_USER envar. :param string password: User password. If not specified, fdb attempts to use ISC_PASSWORD envar. :param string host: Server host machine specification. - :param integer port: Port used by Firebird server **(not used)**. + :param integer port: Port used by Firebird server. :param string database: Database specification (file spec. or alias) :param sql_dialect: SQL Dialect for connection. :type sql_dialect): 1, 2 or 3 @@ -662,25 +718,73 @@ :returns: Connection to database. :rtype: :class:`Connection` instance. - :raises ProgrammingError: For bad parameter values. - :raises DatabaseError: When connection cannot be established. + :raises `~fdb.ProgrammingError`: For bad parameter values. + :raises `~fdb.DatabaseError`: When connection cannot be established. .. important:: You may specify the database using either `dns` or `database` (with optional `host`), but not both. - Examples: + **Examples:** .. code-block:: python - con = fdb.connect(dsn='host:/path/database.fdb', user='sysdba', password='pass', charset='UTF8') - con = fdb.connect(host='myhost', database='/path/database.fdb', user='sysdba', password='pass', charset='UTF8') + con = fdb.connect(dsn='host:/path/database.fdb', user='sysdba', + password='pass', charset='UTF8') + con = fdb.connect(host='myhost', database='/path/database.fdb', + user='sysdba', password='pass', charset='UTF8') + + **Hooks:** + + Event `HOOK_DATABASE_ATTACH_REQUEST`: Executed after all parameters + are preprocessed and before :class:`Connection` is created. Hook + must have signature: hook_func(dsn, dpb) where `dpb` is + :class:`ParameterBuffer` instance. + + Hook may return :class:`Connection` (or subclass) instance or None. + First instance returned by any hook will become the return value + of this function and other hooks are not called. + + Event `HOOK_DATABASE_ATTACHED`: Executed before :class:`Connection` + (or subclass) instance is returned. Hook must have signature: + hook_func(connection). Any value returned by hook is ignored. """ + def build_dpb(user, password, sql_dialect, role, charset, buffers, + force_write, no_reserve, db_key_scope, no_gc, + no_db_triggers, no_linger): + dpb = ParameterBuffer(charset) + dpb.add_parameter_code(isc_dpb_version1) + if user: + dpb.add_string_parameter(isc_dpb_user_name, user) + if password: + dpb.add_string_parameter(isc_dpb_password, password) + if role: + dpb.add_string_parameter(isc_dpb_sql_role_name, role) + if sql_dialect: + dpb.add_byte_parameter(isc_dpb_sql_dialect, sql_dialect) + if charset: + dpb.add_string_parameter(isc_dpb_lc_ctype, charset.upper()) + if buffers: + dpb.add_integer_parameter(isc_dpb_num_buffers, buffers) + if force_write: + dpb.add_byte_parameter(isc_dpb_force_write, force_write) + if no_reserve: + dpb.add_byte_parameter(isc_dpb_no_reserve, no_reserve) + if db_key_scope: + dpb.add_byte_parameter(isc_dpb_dbkey_scope, db_key_scope) + if no_gc: + dpb.add_byte_parameter(isc_dpb_no_garbage_collect, no_gc) + if no_db_triggers: + dpb.add_byte_parameter(isc_dpb_no_db_triggers, no_db_triggers) + if no_linger: + dpb.add_byte_parameter(isc_dpb_nolinger, no_linger) + return dpb + load_api(fb_library_name) - if connection_class == None: + if connection_class is None: connection_class = Connection - if not issubclass(connection_class,Connection): + if not issubclass(connection_class, Connection): raise ProgrammingError("'connection_class' must be subclass of Connection") if not user: user = os.environ.get('ISC_USER', None) @@ -690,47 +794,69 @@ if sql_dialect not in [1, 2, 3]: raise ProgrammingError("SQl Dialect must be either 1, 2 or 3") - if ((not dsn and not host and not database) - or (dsn and (host or database)) - or (host and not database) - ): - raise ProgrammingError( - "Must supply one of:\n" - " 1. keyword argument dsn='host:/path/to/database'\n" - " 2. both keyword arguments host='host' and" - " database='/path/to/database'\n" - " 3. only keyword argument database='/path/to/database'" - ) + if ((not dsn and not host and not database) or + (dsn and (host or database)) or + (host and not database)): + raise ProgrammingError("Must supply one of:\n" + " 1. keyword argument dsn='host:/path/to/database'\n" + " 2. both keyword arguments host='host' and" + " database='/path/to/database'\n" + " 3. only keyword argument database='/path/to/database'") if not dsn: if host and host.endswith(':'): raise ProgrammingError("Host must not end with a colon." - " You should specify host='%s' rather than host='%s'." - % (host[:-1], host) - ) + " You should specify host='%s' rather than host='%s'." + % (host[:-1], host)) elif host: - dsn = '%s:%s' % (host, database) + if port: + dsn = '%s/%d:%s' % (host, port, database) + else: + dsn = '%s:%s' % (host, database) else: - dsn = database + if port: + dsn = 'localhost/%d:%s' % (port, database) + else: + dsn = database - dsn = b(dsn,_FS_ENCODING) + dsn = b(dsn, _FS_ENCODING) if charset: charset = charset.upper() - dpb = build_dpb(user, password, sql_dialect, role, charset, buffers,force_write, + # + dpb = build_dpb(user, password, sql_dialect, role, charset, buffers, force_write, no_reserve, db_key_scope, no_gc, no_db_triggers, no_linger) + # + # Pre-attach hook + # + con = None + for hook in get_hooks(HOOK_DATABASE_ATTACH_REQUEST): + try: + con = hook(dsn, dpb) + except Exception as e: + raise ProgrammingError("Error in DATABASE_ATTACH_REQUEST hook.", *e.args) + if con is not None: + break + # + if con is None: - _isc_status = ISC_STATUS_ARRAY() - _db_handle = isc_db_handle(0) - - api.isc_attach_database(_isc_status, len(dsn), dsn, _db_handle, len(dpb), - dpb) - if db_api_error(_isc_status): - raise exception_from_status(DatabaseError, _isc_status, - "Error while connecting to database:") - - return connection_class(_db_handle, dpb, sql_dialect, charset, isolation_level) + _isc_status = ISC_STATUS_ARRAY() + _db_handle = isc_db_handle(0) + dpbuf = dpb.get_buffer() + api.isc_attach_database(_isc_status, len(dsn), dsn, _db_handle, + len(dpbuf), dpbuf) + if db_api_error(_isc_status): + raise exception_from_status(DatabaseError, _isc_status, + "Error while connecting to database:") + + con = connection_class(_db_handle, dpbuf, sql_dialect, + charset, isolation_level) + # + for hook in get_hooks(HOOK_DATABASE_ATTACHED): + hook(con) + # + return con def create_database(sql='', sql_dialect=3, dsn='', user=None, password=None, - host=None, port=3050, database=None, + host=None, port=None, database=None, page_size=None, length=None, charset=None, files=None, connection_class=None, fb_library_name=None): """ @@ -744,7 +870,7 @@ :param string user: User name. If not specified, fdb attempts to use ISC_USER envar. :param string password: User password. If not specified, fdb attempts to use ISC_PASSWORD envar. :param string host: Server host machine specification. - :param integer port: Port used by Firebird server **(not used)**. + :param integer port: Port used by Firebird server. :param string database: Database specification (file spec. or alias) :param integer page_size: Database page size. :param integer length: Database size in pages. @@ -757,20 +883,26 @@ :returns: Connection to the newly created database. :rtype: :class:`Connection` instance. - :raises ProgrammingError: For bad parameter values. - :raises DatabaseError: When database creation fails. + :raises `~fdb.ProgrammingError`: For bad parameter values. + :raises `~fdb.DatabaseError`: When database creation fails. - Example: + **Example:** .. code-block:: python con = fdb.create_database("create database '/temp/db.fdb' user 'sysdba' password 'pass'") con = fdb.create_database(dsn='/temp/db.fdb',user='sysdba',password='pass',page_size=8192) + + **Hooks:** + + Event` HOOK_DATABASE_ATTACHED`: Executed before :class:`Connection` + (or subclass) instance is returned. Hook must have signature: + hook_func(connection). Any value returned by hook is ignored. """ load_api(fb_library_name) - if connection_class == None: + if connection_class is None: connection_class = Connection - if not issubclass(connection_class,Connection): + if not issubclass(connection_class, Connection): raise ProgrammingError("'connection_class' must be subclass of Connection") # Database to create must be specified by either `sql` or other parameters. @@ -785,41 +917,42 @@ if sql_dialect not in [1, 2, 3]: raise ProgrammingError("SQl Dialect must be either 1, 2 or 3") - if ((not dsn and not host and not database) - or (dsn and (host or database)) - or (host and not database) - ): - raise ProgrammingError( - "Must supply one of:\n" - " 1. keyword argument dsn='host:/path/to/database'\n" - " 2. both keyword arguments host='host' and" - " database='/path/to/database'\n" - " 3. only keyword argument database='/path/to/database'" - ) + if ((not dsn and not host and not database) or + (dsn and (host or database)) or + (host and not database)): + raise ProgrammingError("Must supply one of:\n" + " 1. keyword argument dsn='host:/path/to/database'\n" + " 2. both keyword arguments host='host' and" + " database='/path/to/database'\n" + " 3. only keyword argument database='/path/to/database'") if not dsn: if host and host.endswith(':'): raise ProgrammingError("Host must not end with a colon." - " You should specify host='%s' rather than host='%s'." - % (host[:-1], host) - ) + " You should specify host='%s' rather than host='%s'." + % (host[:-1], host)) elif host: - dsn = '%s:%s' % (host, database) + if port: + dsn = '%s/%d:%s' % (host, port, database) + else: + dsn = '%s:%s' % (host, database) else: - dsn = database - - dsn = b(dsn,_FS_ENCODING) + if port: + dsn = 'localhost/%d:%s' % (port, database) + else: + dsn = database # Parameter checks - sql = "create database '%s' user '%s' password '%s'" % (dsn,user,password) + sql = "create database '%s' user '%s' password '%s'" % (dsn, user, password) if page_size: - sql = '%s page_size %i' % (sql,page_size) + sql = '%s page_size %i' % (sql, page_size) if length: - sql = '%s length %i' % (sql,length) + sql = '%s length %i' % (sql, length) if charset: - sql = '%s default character set %s' % (sql,charset.upper()) + sql = '%s default character set %s' % (sql, charset.upper()) if files: - sql = '%s %s' % (sql,files) + sql = '%s %s' % (sql, files) + sql = b(sql, _FS_ENCODING) isc_status = ISC_STATUS_ARRAY(0) trans_handle = isc_tr_handle(0) @@ -829,14 +962,16 @@ # For yet unknown reason, the isc_dsql_execute_immediate segfaults when # NULL (None) is passed as XSQLDA, so we provide one here api.isc_dsql_execute_immediate(isc_status, db_handle, trans_handle, - ctypes.c_ushort(len(sql)), sql, sql_dialect, - ctypes.cast(ctypes.pointer(xsqlda),XSQLDA_PTR)) + ctypes.c_ushort(len(sql)), sql, sql_dialect, + ctypes.cast(ctypes.pointer(xsqlda), XSQLDA_PTR)) if db_api_error(isc_status): raise exception_from_status(DatabaseError, isc_status, "Error while creating database:") - return connection_class(db_handle,sql_dialect=sql_dialect, charset=charset) - + con = connection_class(db_handle, sql_dialect=sql_dialect, charset=charset) + for hook in get_hooks(HOOK_DATABASE_ATTACHED): + hook(HOOK_DATABASE_ATTACHED, con) + return con class _cursor_weakref_callback(object): """Wraps callback function used in weakrefs so it's called only if still exists. @@ -857,6 +992,8 @@ func(*args, **kwargs) + + class TransactionContext(object): """Context Manager that manages transaction for object passed to constructor. @@ -874,14 +1011,14 @@ """ #: Transaction-like object this instance manages. transaction = None - def __init__(self,transaction): + def __init__(self, transaction): ":param transaction: Any object that supports `begin()`, `commit()` and `rollback()`." self.transaction = transaction def __enter__(self): self.transaction.begin() return self.transaction def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type == None: + if exc_type is None: self.transaction.commit() else: self.transaction.rollback() @@ -930,7 +1067,7 @@ # ReadOnly ReadCommitted transaction self._query_transaction = Transaction([self], default_tpb=ISOLATION_LEVEL_READ_COMMITED_RO) - self._transactions = [self._main_transaction,self._query_transaction] + self._transactions = [self._main_transaction, self._query_transaction] self.__precision_cache = {} self.__sqlsubtype_cache = {} self.__conduits = [] @@ -950,14 +1087,14 @@ verstr = self.db_info(isc_info_firebird_version) x = verstr.split() if x[0].find('V') > 0: - (x,self.__version) = x[0].split('V') + (x, self.__version) = x[0].split('V') elif x[0].find('T') > 0: - (x,self.__version) = x[0].split('T') + (x, self.__version) = x[0].split('T') else: # Unknown version self.__version = '0.0.0.0' x = self.__version.split('.') - self.__engine_version = float('%s.%s' % (x[0],x[1])) + self.__engine_version = float('%s.%s' % (x[0], x[1])) # self.__page_size = self.db_info(isc_info_page_size) def __remove_group(self, group_ref): @@ -970,7 +1107,7 @@ if self.group is not None: raise ProgrammingError(err_msg) def __check_attached(self): - if self._db_handle == None: + if self._db_handle is None: raise ProgrammingError("Connection object is detached from database") def __close(self, detach=True): if self._db_handle != None: @@ -990,6 +1127,10 @@ api.isc_detach_database(self._isc_status, self._db_handle) finally: self._db_handle = None + for hook in get_hooks(HOOK_DATABASE_CLOSED): + hook(self) + # + def __enter__(self): return self def __exit__(self, *args): @@ -1003,7 +1144,7 @@ def __get_transactions(self): return tuple(self._transactions) def __get_closed(self): - return self._db_handle == None + return self._db_handle is None def __get_server_version(self): return self.db_info(isc_info_version) def __get_firebird_version(self): @@ -1015,15 +1156,14 @@ def __get_default_tpb(self): return self._default_tpb def __set_default_tpb(self, value): - self._default_tpb = _validateTPB(value) + self._default_tpb = _validate_tpb(value) def __get_charset(self): return self.__charset def __set_charset(self, value): # More informative error message: raise AttributeError("A connection's 'charset' property can be" - " specified upon Connection creation as a keyword argument to" - " fdb.connect, but it cannot be modified thereafter." - ) + " specified upon Connection creation as a keyword argument to" + " fdb.connect, but it cannot be modified thereafter.") def __get_group(self): if self.__group: try: @@ -1034,7 +1174,7 @@ return None def __get_ods(self): if not self.__ods: - self.__ods = float('%d.%d' % (self.ods_version,self.ods_minor_version)) + self.__ods = float('%d.%d' % (self.ods_version, self.ods_minor_version)) return self.__ods def __get_ods_version(self): return self.db_info(isc_info_ods_version) @@ -1051,7 +1191,7 @@ def __get_attachment_id(self): return self.db_info(isc_info_attachment_id) def __get_io_stats(self): - return self.db_info([isc_info_reads,isc_info_writes,isc_info_fetches,isc_info_marks]) + return self.db_info([isc_info_reads, isc_info_writes, isc_info_fetches, isc_info_marks]) def __get_current_memory(self): return self.db_info(isc_info_current_memory) def __get_max_memory(self): @@ -1132,21 +1272,21 @@ "for databases with ODS 11.1 and higher.") return self.__monitor def _get_array_sqlsubtype(self, relation, column): - subtype = self.__sqlsubtype_cache.get((relation,column)) + subtype = self.__sqlsubtype_cache.get((relation, column)) if subtype is not None: return subtype self.__ic.execute("SELECT FIELD_SPEC.RDB$FIELD_SUB_TYPE" - " FROM RDB$FIELDS FIELD_SPEC, RDB$RELATION_FIELDS REL_FIELDS" - " WHERE" - " FIELD_SPEC.RDB$FIELD_NAME = REL_FIELDS.RDB$FIELD_SOURCE" - " AND REL_FIELDS.RDB$RELATION_NAME = ?" - " AND REL_FIELDS.RDB$FIELD_NAME = ?", - (p3fix(relation,self._python_charset), - p3fix(column,self._python_charset))) + " FROM RDB$FIELDS FIELD_SPEC, RDB$RELATION_FIELDS REL_FIELDS" + " WHERE" + " FIELD_SPEC.RDB$FIELD_NAME = REL_FIELDS.RDB$FIELD_SOURCE" + " AND REL_FIELDS.RDB$RELATION_NAME = ?" + " AND REL_FIELDS.RDB$FIELD_NAME = ?", + (p3fix(relation, self._python_charset), + p3fix(column, self._python_charset))) result = self.__ic.fetchone() self.__ic.close() if result: - self.__sqlsubtype_cache[(relation,column)] = result[0] + self.__sqlsubtype_cache[(relation, column)] = result[0] return result[0] def _determine_field_precision(self, sqlvar): if sqlvar.relname_length == 0 or sqlvar.sqlname_length == 0: @@ -1156,7 +1296,7 @@ return 0 # Special case for automatic RDB$DB_KEY fields. if ((sqlvar.sqlname_length == 6 and sqlvar.sqlname == 'DB_KEY') or - (sqlvar.sqlname_length == 10 and sqlvar.sqlname == 'RDB$DB_KEY')): + (sqlvar.sqlname_length == 10 and sqlvar.sqlname == 'RDB$DB_KEY')): return 0 precision = self.__precision_cache.get((sqlvar.relname, sqlvar.sqlname)) @@ -1164,36 +1304,36 @@ return precision # First, try table self.__ic.execute("SELECT FIELD_SPEC.RDB$FIELD_PRECISION" - " FROM RDB$FIELDS FIELD_SPEC," - " RDB$RELATION_FIELDS REL_FIELDS" - " WHERE" - " FIELD_SPEC.RDB$FIELD_NAME =" - " REL_FIELDS.RDB$FIELD_SOURCE" - " AND REL_FIELDS.RDB$RELATION_NAME = ?" - " AND REL_FIELDS.RDB$FIELD_NAME = ?", - (p3fix(sqlvar.relname,self._python_charset), - p3fix(sqlvar.sqlname,self._python_charset))) + " FROM RDB$FIELDS FIELD_SPEC," + " RDB$RELATION_FIELDS REL_FIELDS" + " WHERE" + " FIELD_SPEC.RDB$FIELD_NAME =" + " REL_FIELDS.RDB$FIELD_SOURCE" + " AND REL_FIELDS.RDB$RELATION_NAME = ?" + " AND REL_FIELDS.RDB$FIELD_NAME = ?", + (p3fix(sqlvar.relname, self._python_charset), + p3fix(sqlvar.sqlname, self._python_charset))) result = self.__ic.fetchone() self.__ic.close() if result: - self.__precision_cache[(sqlvar.relname,sqlvar.sqlname)] = result[0] + self.__precision_cache[(sqlvar.relname, sqlvar.sqlname)] = result[0] return result[0] # Next, try stored procedure output parameter self.__ic.execute("SELECT FIELD_SPEC.RDB$FIELD_PRECISION" - " FROM RDB$FIELDS FIELD_SPEC," - " RDB$PROCEDURE_PARAMETERS REL_FIELDS" - " WHERE" - " FIELD_SPEC.RDB$FIELD_NAME =" - " REL_FIELDS.RDB$FIELD_SOURCE" - " AND RDB$PROCEDURE_NAME = ?" - " AND RDB$PARAMETER_NAME = ?" - " AND RDB$PARAMETER_TYPE = 1", - (p3fix(sqlvar.relname,self._python_charset), - p3fix(sqlvar.sqlname,self._python_charset))) + " FROM RDB$FIELDS FIELD_SPEC," + " RDB$PROCEDURE_PARAMETERS REL_FIELDS" + " WHERE" + " FIELD_SPEC.RDB$FIELD_NAME =" + " REL_FIELDS.RDB$FIELD_SOURCE" + " AND RDB$PROCEDURE_NAME = ?" + " AND RDB$PARAMETER_NAME = ?" + " AND RDB$PARAMETER_TYPE = 1", + (p3fix(sqlvar.relname, self._python_charset), + p3fix(sqlvar.sqlname, self._python_charset))) result = self.__ic.fetchone() self.__ic.close() if result: - self.__precision_cache[(sqlvar.relname,sqlvar.sqlname)] = result[0] + self.__precision_cache[(sqlvar.relname, sqlvar.sqlname)] = result[0] return result[0] # We ran out of options return 0 @@ -1203,11 +1343,11 @@ Unlike plain file deletion, this method behaves responsibly, in that it removes shadow files and other ancillary files for this database. - :raises ProgrammingError: When connection is a member of a :class:`ConnectionGroup`. - :raises DatabaseError: When error is returned from server. + :raises `~fdb.ProgrammingError`: When connection is a member of a :class:`ConnectionGroup`. + :raises `~fdb.DatabaseError`: When error is returned from server. """ self.__ensure_group_membership(False, "Cannot drop database via" - " connection that is part of a ConnectionGroup.") + " connection that is part of a ConnectionGroup.") saved_handle = isc_db_handle(self._db_handle.value) self.__close(detach=False) api.isc_drop_database(self._isc_status, saved_handle) @@ -1233,12 +1373,12 @@ :param string sql: SQL statement to execute. - :raises ProgrammingError: When connection is closed. - :raises DatabaseError: When error is returned from server. + :raises `~fdb.ProgrammingError`: When connection is closed. + :raises `~fdb.DatabaseError`: When error is returned from server. """ self.__check_attached() self.main_transaction.execute_immediate(sql) - def database_info(self, info_code, result_type, page_number = None): + def database_info(self, info_code, result_type, page_number=None): """Wraps the Firebird C API function `isc_database_info`. For documentation, see the IB 6 API Guide section entitled @@ -1256,14 +1396,14 @@ all possible isc_info_* items. :param integer info_code: One of the `isc_info_*` constants. - :param string result_type: Must be either ‘s’ if you expect a string result, + :param string result_type: Must be either ‘b’ if you expect a binary string result, or ‘i’ if you expect an integer result. :param integer page_number: Page number for `fb_info_page_contents` info code. - :raises DatabaseError: When error is returned from server. - :raises OperationalError: When returned information is bigger than SHRT_MAX. - :raises InternalError: On unexpected processing condition. - :raises ValueError: On illegal `result_type` value. + :raises `~fdb.DatabaseError`: When error is returned from server. + :raises `~fdb.OperationalError`: When returned information is bigger than SHRT_MAX. + :raises `~fdb.InternalError`: On unexpected processing condition. + :raises `ValueError`: On illegal `result_type` value. .. seealso:: Extracting data with the database_info function is rather clumsy. See :meth:`db_info` for higher-level means of accessing the @@ -1277,16 +1417,16 @@ buf_size = 256 if info_code != fb_info_page_contents else self.page_size + 10 request_buffer = bs([info_code]) if info_code == fb_info_page_contents: - request_buffer += int_to_bytes(2, 2) + request_buffer += int_to_bytes(4, 2) request_buffer += int_to_bytes(page_number, 4) while True: res_buf = int2byte(0) * buf_size api.isc_database_info(self._isc_status, self._db_handle, - len(request_buffer), request_buffer, - len(res_buf), res_buf) + len(request_buffer), request_buffer, + len(res_buf), res_buf) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, - "Error while requesting database information:") + "Error while requesting database information:") i = buf_size - 1 while i >= 0: if res_buf[i] != mychr(0): @@ -1301,8 +1441,8 @@ continue else: raise OperationalError("Result is too large to fit into" - " buffer of size SHRT_MAX, yet underlying info " - " function only accepts buffers with size <= SHRT_MAX.") + " buffer of size SHRT_MAX, yet underlying info " + " function only accepts buffers with size <= SHRT_MAX.") else: break if ord2(res_buf[i]) != isc_info_end: @@ -1314,17 +1454,18 @@ raise InternalError("Result code does not match request code.") if result_type.upper() == 'I': return bytes_to_int(res_buf[3:3 + bytes_to_int(res_buf[1:3])]) - elif (result_type.upper() == 'S' + elif (result_type.upper() == 'B' and info_code in _DATABASE_INFO__KNOWN_LOW_LEVEL_EXCEPTIONS): # The result buffers for a few request codes don't follow the generic # conventions, so we need to return their full contents rather than # omitting the initial infrastructural bytes. return ctypes.string_at(res_buf, i) - elif result_type.upper() == 'S': + #elif result_type.upper() == 'S': + #return ctypes.string_at(res_buf[3:], bytes_to_int(res_buf[1:3])) + elif result_type.upper() == 'B': return ctypes.string_at(res_buf[3:], bytes_to_int(res_buf[1:3])) else: - raise ValueError("Unknown result type requested " - "(must be 'i' or 's').") + raise ValueError("Unknown result type requested (must be 'i', 'b' or 's').") def db_info(self, request): """ Higher-level convenience wrapper around the :meth:`database_info` method @@ -1334,112 +1475,79 @@ :param request: Single `fdb.isc_info_*` info request code or a sequence of such codes. :returns: Mapping of (info request code -> result). - :raises ValueError: When requested code is not recognized. - :raises OperationalError: On unexpected processing condition. + :raises `ValueError`: When requested code is not recognized. + :raises `~fdb.OperationalError`: On unexpected processing condition. """ - def _extractDatabaseInfoCounts(buf): + def _extract_database_info_counts(buf): # Extract a raw binary sequence # of (unsigned short, signed int) pairs into # a corresponding Python dictionary. - uShortSize = struct.calcsize(' 0: - if PYTHON_MAJOR_VER == 3: - slen = struct.unpack('B', int2byte(buf[pos]))[0] - else: - slen = struct.unpack('B', buf[pos])[0] + slen = unpack_num(buf, pos) pos += 1 - item = buf[pos:pos + slen] pos += slen - items.append(p3fix(item,self._python_charset)) + items.append(p3fix(item, self._python_charset)) count -= 1 - - results[infoCode] = tuple(items) - elif infoCode == isc_info_implementation: - # (IB 6 API Guide page 52) - buf = self.database_info(infoCode, 's') - # Skip the first four bytes. + results[info_code] = tuple(items) + elif info_code == fb_info_implementation: + buf = self.database_info(info_code, 'b') pos = 1 - - if PYTHON_MAJOR_VER == 3: - implNumber = struct.unpack('B', int2byte(buf[pos]))[0] - else: - implNumber = struct.unpack('B', buf[pos])[0] + cpu_id = unpack_num(buf, pos) pos += 1 - - if PYTHON_MAJOR_VER == 3: - classNumber = struct.unpack('B', int2byte(buf[pos]))[0] - else: - classNumber = struct.unpack('B', buf[pos])[0] + os_id = unpack_num(buf, pos) pos += 1 - - results[infoCode] = (implNumber, classNumber) - elif infoCode in (isc_info_version, isc_info_firebird_version): - # (IB 6 API Guide page 53) - buf = self.database_info(infoCode, 's') - # Skip the first byte. + compiler_id = unpack_num(buf, pos) + pos += 1 + flags = unpack_num(buf, pos) + pos += 1 + class_number = unpack_num(buf, pos) + results[info_code] = (cpu_id, os_id, compiler_id, flags, class_number) + elif info_code == isc_info_implementation: + buf = self.database_info(info_code, 'b') pos = 1 - - if PYTHON_MAJOR_VER == 3: - versionStringLen = (struct.unpack('B', - int2byte(buf[pos]))[0]) - else: - versionStringLen = (struct.unpack('B', buf[pos])[0]) + impl_number = unpack_num(buf, pos) pos += 1 - - versionString = buf[pos:pos + versionStringLen] - - results[infoCode] = p3fix(versionString,self._python_charset) - elif infoCode == isc_info_user_names: - # (IB 6 API Guide page 54) - # + class_number = unpack_num(buf, pos) + results[info_code] = (impl_number, class_number) + elif info_code in (isc_info_version, isc_info_firebird_version): + buf = self.database_info(info_code, 'b') + pos = 1 + version_string_len = unpack_num(buf, pos) + pos += 1 + results[info_code] = p3fix(buf[pos:pos + version_string_len], self._python_charset) + elif info_code == isc_info_user_names: # The isc_info_user_names results buffer does not exactly match # the format declared on page 54 of the IB 6 API Guide. # The buffer is formatted as a sequence of clusters, each of @@ -1453,8 +1561,7 @@ # cluster declared on page 51 while also [trying, but failing # to] adhere to the isc_info_user_names-specific format # declared on page 54. - buf = self.database_info(infoCode, 's') - + buf = self.database_info(info_code, 'b') usernames = [] pos = 0 while pos < len(buf): @@ -1462,72 +1569,62 @@ if PYTHON_MAJOR_VER == 3: if buf[pos] != isc_info_user_names: raise OperationalError('While trying to service' - ' isc_info_user_names request, found unexpected' - ' results buffer contents at position %d of [%s]' - % (pos, buf) - ) + ' isc_info_user_names request, found unexpected' + ' results buffer contents at position %d of [%s]' + % (pos, buf)) pos += 1 - # The two-byte cluster length: - nameClusterLen = (struct.unpack(' number of connections by that user. res = {} for un in usernames: res[un] = res.get(un, 0) + 1 - results[infoCode] = res - elif infoCode == isc_info_active_transactions: - buf = self.database_info(infoCode, 's') + results[info_code] = res + elif info_code in (isc_info_active_transactions, isc_info_limbo): + buf = self.database_info(info_code, 'b') transactions = [] - uShortSize = struct.calcsize('= 0 or attr.startswith('_') - or attr in ['close','bind'] or hasattr(self,attr)): - val = getattr(self.__schema,attr) + or attr in ['close', 'bind'] or hasattr(self, attr)): + val = getattr(self.__schema, attr) if callable(val): - setattr(self,attr,val) + setattr(self, attr, val) def _get_schema(self): return self.__schema @@ -1897,7 +1973,7 @@ event_buf = None #: Result buffer result_buf = None - def __init__(self,queue,db_handle,event_names): + def __init__(self, queue, db_handle, event_names): self.__first = True def callback(result, length, updated): ctypes.memmove(result, updated, length) @@ -1919,17 +1995,17 @@ self.event_id = ISC_LONG(0) self.buf_length = api.isc_event_block(ctypes.pointer(self.event_buf), - ctypes.pointer(self.result_buf), - *[b(x) for x in event_names]) + ctypes.pointer(self.result_buf), + *[b(x) for x in event_names]) def _begin(self): self.__wait_for_events() - def __lt__(self,other): + def __lt__(self, other): return self.event_id.value < other.event_id.value def __wait_for_events(self): - api.isc_que_events(self._isc_status,self._db_handle,self.event_id, - self.buf_length,self.event_buf, - self.__callback,self.result_buf) + api.isc_que_events(self._isc_status, self._db_handle, self.event_id, + self.buf_length, self.event_buf, + self.__callback, self.result_buf) if db_api_error(self._isc_status): self.close() raise exception_from_status(DatabaseError, self._isc_status, @@ -1938,7 +2014,7 @@ "Count event occurences and reregister interest in futrther notifications." result = {} api.isc_event_counts(self.__results, self.buf_length, - self.event_buf, self.result_buf) + self.event_buf, self.result_buf) if self.__first: # Ignore the first call, it's for setting up the table self.__first = False @@ -1952,7 +2028,7 @@ def close(self): "Close this block canceling managed events." if not self.closed: - api.isc_cancel_events(self._isc_status,self._db_handle,self.event_id) + api.isc_cancel_events(self._isc_status, self._db_handle, self.event_id) self.__closed = True del self.__callback if db_api_error(self._isc_status): @@ -1993,7 +2069,7 @@ events = conduit.wait() process_events(events) """ - def __init__(self,db_handle,event_names): + def __init__(self, db_handle, event_names): """ :param db_handle: Database handle. :param event_names: List of strings that represent event names. @@ -2001,7 +2077,7 @@ self._db_handle = db_handle self._isc_status = ISC_STATUS_ARRAY(0) self.__event_names = list(event_names) - self.__events = {}.fromkeys(self.__event_names,0) + self.__events = {}.fromkeys(self.__event_names, 0) self.__event_blocks = [] self.__closed = False self.__queue = ibase.PriorityQueue() @@ -2024,7 +2100,7 @@ if operation == ibase.OP_RECORD_AND_REREGISTER: events = data.count_and_reregister() if events: - for key,value in events.items(): + for key, value in events.items(): self.__events[key] += value self.__events_ready.set() elif operation == ibase.OP_DIE: @@ -2040,7 +2116,7 @@ self.__event_blocks.append(event_block) event_block._begin() - def wait(self,timeout=None): + def wait(self, timeout=None): """Wait for events. Blocks the calling thread until at least one of the events occurs, or @@ -2070,7 +2146,8 @@ at all. """ if not self.__initialized: - raise ProgrammingError("Event collection not initialized. It's necessary to call begin().") + raise ProgrammingError("Event collection not initialized. " + "It's necessary to call begin().") if not self.closed: self.__events_ready.wait(timeout) return self.__events.copy() @@ -2080,7 +2157,7 @@ """ if not self.closed: self.__events_ready.clear() - self.__events = {}.fromkeys(self.__event_names,0) + self.__events = {}.fromkeys(self.__event_names, 0) def close(self): """Cancels the standing request for this conduit to be notified of events. @@ -2088,7 +2165,7 @@ and should be discarded. """ if not self.closed: - self.__queue.put((ibase.OP_DIE,self)) + self.__queue.put((ibase.OP_DIE, self)) self.__process_thread.join() for block in self.__event_blocks: block.close() @@ -2148,6 +2225,7 @@ # type constants in the isc_info_sql_stmt_* series. self.statement_type = None self.__streamed_blobs = [] + self.__streamed_blob_treshold = 65536 self.__blob_readers = [] self.__executed = False self.__prepared = False @@ -2162,53 +2240,42 @@ # allocate statement handle self._stmt_handle = isc_stmt_handle(0) - api.isc_dsql_allocate_statement(self._isc_status, - connection._db_handle, - self._stmt_handle) + api.isc_dsql_allocate_statement(self._isc_status, connection._db_handle, self._stmt_handle) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, "Error while allocating SQL statement:") # prepare statement - op = b(operation,self.__python_charset) - api.isc_dsql_prepare(self._isc_status, - self.cursor._transaction._tr_handle, - self._stmt_handle, - len(op),op, - self.__sql_dialect, - ctypes.cast(ctypes.pointer(self._out_sqlda), - XSQLDA_PTR)) + op = b(operation, self.__python_charset) + api.isc_dsql_prepare(self._isc_status, self.cursor._transaction._tr_handle, + self._stmt_handle, len(op), op, self.__sql_dialect, + ctypes.cast(ctypes.pointer(self._out_sqlda), XSQLDA_PTR)) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, - "Error while preparing SQL statement:") + "Error while preparing SQL statement:") # Determine statement type info = b(' ') * 20 - api.isc_dsql_sql_info(self._isc_status, self._stmt_handle, 1, - bs([isc_info_sql_stmt_type]), - len(info), info) + api.isc_dsql_sql_info(self._isc_status, self._stmt_handle, 1, bs([isc_info_sql_stmt_type]), + len(info), info) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, - "Error while determining SQL statement type:") + "Error while determining SQL statement type:") if ord2(info[0]) != isc_info_sql_stmt_type: raise InternalError("Cursor.execute, determine statement type:\n" - "first byte must be 'isc_info_sql_stmt_type'") + "first byte must be 'isc_info_sql_stmt_type'") self.statement_type = bytes_to_int(info[3:3 + bytes_to_int(info[1:3])]) # Init XSQLDA for input parameters - api.isc_dsql_describe_bind(self._isc_status, self._stmt_handle, - self.__sql_dialect, - ctypes.cast(ctypes.pointer(self._in_sqlda), - XSQLDA_PTR)) + api.isc_dsql_describe_bind(self._isc_status, self._stmt_handle, self.__sql_dialect, + ctypes.cast(ctypes.pointer(self._in_sqlda), XSQLDA_PTR)) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, - "Error while determining SQL statement parameters:") + "Error while determining SQL statement parameters:") if self._in_sqlda.sqld > self._in_sqlda.sqln: self._in_sqlda = xsqlda_factory(self._in_sqlda.sqld) - api.isc_dsql_describe_bind(self._isc_status, self._stmt_handle, - self.__sql_dialect, - ctypes.cast(ctypes.pointer(self._in_sqlda), - XSQLDA_PTR)) + api.isc_dsql_describe_bind(self._isc_status, self._stmt_handle, self.__sql_dialect, + ctypes.cast(ctypes.pointer(self._in_sqlda), XSQLDA_PTR)) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, - "Error while determining SQL statement parameters:") + "Error while determining SQL statement parameters:") # The number of input parameters the statement requires. self.n_input_params = self._in_sqlda.sqld # record original type and size information so it can be restored for @@ -2216,32 +2283,28 @@ for sqlvar in self._in_sqlda.sqlvar[:self.n_input_params]: self._in_sqlda_save.append((sqlvar.sqltype, sqlvar.sqllen)) # Init output XSQLDA - api.isc_dsql_describe(self._isc_status, self._stmt_handle, - self.__sql_dialect, - ctypes.cast(ctypes.pointer(self._out_sqlda), - XSQLDA_PTR)) + api.isc_dsql_describe(self._isc_status, self._stmt_handle, self.__sql_dialect, + ctypes.cast(ctypes.pointer(self._out_sqlda), XSQLDA_PTR)) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, - "Error while determining SQL statement output:") + "Error while determining SQL statement output:") if self._out_sqlda.sqld > self._out_sqlda.sqln: self._out_sqlda = xsqlda_factory(self._out_sqlda.sqld) - api.isc_dsql_describe(self._isc_status, self._stmt_handle, - self.__sql_dialect, - ctypes.cast(ctypes.pointer(self._out_sqlda), - XSQLDA_PTR)) + api.isc_dsql_describe(self._isc_status, self._stmt_handle, self.__sql_dialect, + ctypes.cast(ctypes.pointer(self._out_sqlda), XSQLDA_PTR)) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, - "Error while determining SQL statement output:") + "Error while determining SQL statement output:") # The number of output fields the statement produces. self.n_output_params = self._out_sqlda.sqld - self.__coerce_XSQLDA(self._out_sqlda) + self.__coerce_xsqlda(self._out_sqlda) self.__prepared = True self._name = None - def __cursor_deleted(self,obj): + def __cursor_deleted(self, obj): self.cursor = None def __get_name(self): return self._name - def __set_name(self,name): + def __set_name(self, name): if self._name: raise ProgrammingError("Cursor's name has already been declared") self._set_cursor_name(name) @@ -2252,8 +2315,7 @@ while True: info = b(' ') * buf_size api.isc_dsql_sql_info(self._isc_status, self._stmt_handle, 2, - bs([isc_info_sql_get_plan,isc_info_end]), - len(info), info) + bs([isc_info_sql_get_plan, isc_info_end]), len(info), info) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, "Error while determining rowcount:") @@ -2277,19 +2339,15 @@ ### Todo: Better handling of P version specifics result = ctypes.string_at(info[_SIZE_OF_SHORT + 2:], size - 1) if PYTHON_MAJOR_VER == 3: - return b2u(result,self.__python_charset) + return b2u(result, self.__python_charset) #return result.decode(charset_map.get(self.__charset,self.__charset)) else: return result def __get_sql(self): return self.__sql def __is_fixed_point(self, dialect, data_type, subtype, scale): - return ((data_type in [SQL_SHORT, SQL_LONG, SQL_INT64] - and (subtype or scale) - ) - or ((dialect < 3) and scale and - (data_type in [SQL_DOUBLE, SQL_D_FLOAT])) - ) + return ((data_type in [SQL_SHORT, SQL_LONG, SQL_INT64] and (subtype or scale)) or + ((dialect < 3) and scale and (data_type in [SQL_DOUBLE, SQL_D_FLOAT]))) def __get_external_data_type_name(self, dialect, data_type, subtype, scale): if data_type == SQL_TEXT: @@ -2371,7 +2429,13 @@ precision = 0 if vartype in [SQL_TEXT, SQL_VARYING]: vtype = StringType - dispsize = sqlvar.sqllen + # CHAR with multibyte encoding requires special handling + if sqlvar.sqlsubtype in (4, 69): # UTF8 and GB18030 + dispsize = sqlvar.sqllen // 4 + elif sqlvar.sqlsubtype == 3: # UNICODE_FSS + dispsize = sqlvar.sqllen // 3 + else: + dispsize = sqlvar.sqllen elif (vartype in [SQL_SHORT, SQL_LONG, SQL_INT64] and (sqlvar.sqlsubtype or scale)): @@ -2426,16 +2490,11 @@ return self.__description def __get_rowcount(self): result = -1 - if (self.__executed and - self.statement_type in [isc_info_sql_stmt_select, - isc_info_sql_stmt_insert, - isc_info_sql_stmt_update, - isc_info_sql_stmt_delete]): + if (self.__executed and self.statement_type in [isc_info_sql_stmt_select, isc_info_sql_stmt_insert, + isc_info_sql_stmt_update, isc_info_sql_stmt_delete]): info = b(' ') * 64 api.isc_dsql_sql_info(self._isc_status, self._stmt_handle, 2, - bs([isc_info_sql_records, - isc_info_end]), - len(info), info) + bs([isc_info_sql_records, isc_info_end]), len(info), info) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, "Error while determining rowcount:") @@ -2450,15 +2509,10 @@ size = bytes_to_uint(info[res_walk:res_walk + short_size]) res_walk += short_size count = bytes_to_uint(info[res_walk:res_walk + size]) - if ((cur_count_type == isc_info_req_select_count - and self.statement_type == isc_info_sql_stmt_select) - or (cur_count_type == isc_info_req_insert_count - and self.statement_type == isc_info_sql_stmt_insert) - or (cur_count_type == isc_info_req_update_count - and self.statement_type == isc_info_sql_stmt_update) - or (cur_count_type == isc_info_req_delete_count - and self.statement_type == isc_info_sql_stmt_delete) - ): + if ((cur_count_type == isc_info_req_select_count and self.statement_type == isc_info_sql_stmt_select) + or (cur_count_type == isc_info_req_insert_count and self.statement_type == isc_info_sql_stmt_insert) + or (cur_count_type == isc_info_req_update_count and self.statement_type == isc_info_sql_stmt_update) + or (cur_count_type == isc_info_req_delete_count and self.statement_type == isc_info_sql_stmt_delete)): result = count res_walk += size return result @@ -2536,7 +2590,7 @@ self.__get_internal_data_type_name(data_type), str(vmin), str(vmax)) raise ProgrammingError(msg, -802) - def __coerce_XSQLDA(self, xsqlda): + def __coerce_xsqlda(self, xsqlda): """Allocate space for SQLVAR data. """ for sqlvar in xsqlda.sqlvar[:self._out_sqlda.sqld]: @@ -2547,43 +2601,43 @@ sqlvar.sqldata = ctypes.create_string_buffer(sqlvar.sqllen + 2) elif vartype == SQL_SHORT: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_LONG: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_INT64: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_FLOAT: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_DOUBLE: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_D_FLOAT: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_BLOB: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_TIMESTAMP: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_TYPE_DATE: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_TYPE_TIME: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_ARRAY: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) elif vartype == SQL_BOOLEAN: sqlvar.sqldata = ctypes.cast(ctypes.create_string_buffer( - sqlvar.sqllen),buf_pointer) + sqlvar.sqllen), buf_pointer) else: pass - def __XSQLDA2Tuple(self, xsqlda): + def __xsqlda2tuple(self, xsqlda): """Move data from output XSQLDA to result tuple. """ values = [] @@ -2596,12 +2650,12 @@ and sqlvar.sqlind.contents.value == -1): value = None elif vartype == SQL_TEXT: - value = ctypes.string_at(sqlvar.sqldata,sqlvar.sqllen) + value = ctypes.string_at(sqlvar.sqldata, sqlvar.sqllen) #value = sqlvar.sqldata[:sqlvar.sqllen] ### Todo: verify handling of P version differences - if ((self.__charset or PYTHON_MAJOR_VER == 3) - and sqlvar.sqlsubtype != 1): # non OCTETS - value = b2u(value,self.__python_charset) + if ((self.__charset or PYTHON_MAJOR_VER == 3) and + sqlvar.sqlsubtype != 1): # non OCTETS + value = b2u(value, self.__python_charset) # CHAR with multibyte encoding requires special handling if sqlvar.sqlsubtype in (4, 69): # UTF8 and GB18030 reallength = sqlvar.sqllen // 4 @@ -2618,15 +2672,15 @@ value = bytes(sqlvar.sqldata[2:2 + size]) else: value = str(sqlvar.sqldata[2:2 + size]) - if ((self.__charset or PYTHON_MAJOR_VER == 3) - and sqlvar.sqlsubtype != 1): # non OCTETS - value = b2u(value,self.__python_charset) + if ((self.__charset or PYTHON_MAJOR_VER == 3) and + sqlvar.sqlsubtype != 1): # non OCTETS + value = b2u(value, self.__python_charset) elif vartype == SQL_BOOLEAN: value = bool(bytes_to_int(sqlvar.sqldata.contents.value)) elif vartype in [SQL_SHORT, SQL_LONG, SQL_INT64]: value = bytes_to_int(sqlvar.sqldata[:sqlvar.sqllen]) # It's scalled integer? - if (sqlvar.sqlsubtype or scale): + if sqlvar.sqlsubtype or scale: value = decimal.Decimal(value) / _tenTo[abs(scale)] elif vartype == SQL_TYPE_DATE: yyyy, mm, dd = self._parse_date(sqlvar.sqldata[:sqlvar.sqllen]) @@ -2642,10 +2696,12 @@ value = struct.unpack('f', sqlvar.sqldata[:sqlvar.sqllen])[0] elif vartype == SQL_DOUBLE: value = struct.unpack('d', sqlvar.sqldata[:sqlvar.sqllen])[0] + elif vartype == SQL_BOOLEAN: + value = bytes_to_int(sqlvar.sqldata[:sqlvar.sqllen]) + value = value == 1 elif vartype == SQL_BLOB: val = sqlvar.sqldata[:sqlvar.sqllen] - blobid = ISC_QUAD(bytes_to_uint(val[:4]), - bytes_to_uint(val[4:sqlvar.sqllen])) + blobid = ISC_QUAD(bytes_to_uint(val[:4]), bytes_to_uint(val[4:sqlvar.sqllen])) # Check if stream BLOB is requested instead materialized one use_stream = False if self.__streamed_blobs: @@ -2660,7 +2716,7 @@ use_stream = True if use_stream: # Stream BLOB - value = BlobReader(blobid,self.cursor._connection._db_handle, + value = BlobReader(blobid, self.cursor._connection._db_handle, self.cursor._transaction._tr_handle, sqlvar.sqlsubtype == 1, self.__charset) @@ -2668,10 +2724,9 @@ else: # Materialized BLOB blob_handle = isc_blob_handle() - api.isc_open_blob2(self._isc_status, - self.cursor._connection._db_handle, - self.cursor._transaction._tr_handle, - blob_handle, blobid, 0, None) + api.isc_open_blob2(self._isc_status, self.cursor._connection._db_handle, + self.cursor._transaction._tr_handle, + blob_handle, blobid, 0, None) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, @@ -2680,9 +2735,8 @@ result = ctypes.cast(ctypes.create_string_buffer(20), buf_pointer) api.isc_blob_info(self._isc_status, blob_handle, 2, - bs([isc_info_blob_total_length, - isc_info_blob_max_segment]), - 20, result) + bs([isc_info_blob_total_length, isc_info_blob_max_segment]), + 20, result) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, @@ -2701,54 +2755,55 @@ segment_size = bytes_to_uint(result[ offset + 2:offset + 2 + length]) offset += length + 2 - # Load BLOB - allow_incomplete_segment_read = False - status = ISC_STATUS(0) - blob = ctypes.create_string_buffer(blob_length) - bytes_read = 0 - bytes_actually_read = ctypes.c_ushort(0) - while bytes_read < blob_length: - status = api.isc_get_segment(self._isc_status, - blob_handle, - bytes_actually_read, - min(segment_size, - blob_length - bytes_read), - ctypes.byref( - blob, bytes_read)) - if status != 0: - if ((status == isc_segment) - and allow_incomplete_segment_read): - bytes_read += bytes_actually_read.value + # Does the blob size exceeds treshold for streamed one? + if ((self.__streamed_blob_treshold >= 0) and + (blob_length > self.__streamed_blob_treshold)): + # Stream BLOB + value = BlobReader(blobid, self.cursor._connection._db_handle, + self.cursor._transaction._tr_handle, + sqlvar.sqlsubtype == 1, + self.__charset) + self.__blob_readers.append(value) + else: + # Load BLOB + allow_incomplete_segment_read = True + status = ISC_STATUS(0) + blob = ctypes.create_string_buffer(blob_length) + bytes_read = 0 + bytes_actually_read = ctypes.c_ushort(0) + while bytes_read < blob_length: + status = api.isc_get_segment(self._isc_status, blob_handle, + bytes_actually_read, + min(segment_size, blob_length - bytes_read), + ctypes.byref(blob, bytes_read)) + if status != 0: + if (status == isc_segment) and allow_incomplete_segment_read: + bytes_read += bytes_actually_read.value + else: + raise exception_from_status(DatabaseError, self._isc_status, + "Cursor.read_output_blob/isc_get_segment:") else: - raise exception_from_status(DatabaseError, - self._isc_status, - "Cursor.read_output_blob/isc_get_segment:") - else: - bytes_read += bytes_actually_read.value - # Finish + bytes_read += bytes_actually_read.value + # Finalize value + value = blob.raw + if (self.__charset or PYTHON_MAJOR_VER == 3) and sqlvar.sqlsubtype == 1: + value = b2u(value, self.__python_charset) + # Close blob api.isc_close_blob(self._isc_status, blob_handle) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, "Cursor.read_otput_blob/isc_close_blob:") - value = blob.raw - if ((self.__charset or PYTHON_MAJOR_VER == 3) - and sqlvar.sqlsubtype == 1): - value = b2u(value,self.__python_charset) elif vartype == SQL_ARRAY: value = [] val = sqlvar.sqldata[:sqlvar.sqllen] - arrayid = ISC_QUAD(bytes_to_uint(val[:4]), - bytes_to_uint(val[4:sqlvar.sqllen])) + arrayid = ISC_QUAD(bytes_to_uint(val[:4]), bytes_to_uint(val[4:sqlvar.sqllen])) arraydesc = ISC_ARRAY_DESC(0) sqlsubtype = self.cursor._connection._get_array_sqlsubtype(sqlvar.relname, sqlvar.sqlname) - api.isc_array_lookup_bounds(self._isc_status, - self.cursor._connection._db_handle, - self.cursor._transaction._tr_handle, - sqlvar.relname, - sqlvar.sqlname, - arraydesc) + api.isc_array_lookup_bounds(self._isc_status, self.cursor._connection._db_handle, + self.cursor._transaction._tr_handle, + sqlvar.relname, sqlvar.sqlname, arraydesc) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, @@ -2756,7 +2811,7 @@ value_type = arraydesc.array_desc_dtype value_scale = arraydesc.array_desc_scale value_size = arraydesc.array_desc_length - if value_type in (blr_varying,blr_varying2): + if value_type in (blr_varying, blr_varying2): value_size += 2 dimensions = [] total_num_elements = 1 @@ -2769,38 +2824,36 @@ value_buffer = ctypes.cast(buf, buf_pointer) tsize = ISC_LONG(total_size) - api.isc_array_get_slice(self._isc_status, - self.cursor._connection._db_handle, - self.cursor._transaction._tr_handle, - arrayid, arraydesc, - value_buffer, tsize) + api.isc_array_get_slice(self._isc_status, self.cursor._connection._db_handle, + self.cursor._transaction._tr_handle, arrayid, arraydesc, + value_buffer, tsize) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, "Cursor.read_otput_array/isc_array_get_slice:") - (value,bufpos) = self.__extract_db_array_to_list(value_size, - value_type, - sqlsubtype, - value_scale, - 0, dimensions, - value_buffer,0) + (value, bufpos) = self.__extract_db_array_to_list(value_size, + value_type, + sqlsubtype, + value_scale, + 0, dimensions, + value_buffer, 0) values.append(value) return tuple(values) - def __extract_db_array_to_list(self,esize,dtype,subtype,scale,dim,dimensions, - buf,bufpos): + def __extract_db_array_to_list(self, esize, dtype, subtype, scale, dim, dimensions, + buf, bufpos): """Extracts ARRRAY column data from buffer to Python list(s). """ value = [] if dim == len(dimensions)-1: for i in xrange(dimensions[dim]): - if dtype in (blr_text,blr_text2): - val = ctypes.string_at(buf[bufpos:bufpos+esize],esize) + if dtype in (blr_text, blr_text2): + val = ctypes.string_at(buf[bufpos:bufpos+esize], esize) ### Todo: verify handling of P version differences if ((self.__charset or PYTHON_MAJOR_VER == 3) - and subtype != 1): # non OCTETS - val = b2u(val,self.__python_charset) + and subtype != 1): # non OCTETS + val = b2u(val, self.__python_charset) # CHAR with multibyte encoding requires special handling if subtype in (4, 69): # UTF8 and GB18030 reallength = esize // 4 @@ -2809,18 +2862,20 @@ else: reallength = esize val = val[:reallength] - elif dtype in (blr_varying,blr_varying2): + elif dtype in (blr_varying, blr_varying2): val = ctypes.string_at(buf[bufpos:bufpos+esize]) - if ((self.__charset or PYTHON_MAJOR_VER == 3) - and subtype != 1): # non OCTETS - val = b2u(val,self.__python_charset) - elif dtype in (blr_short,blr_long,blr_int64): + if ((self.__charset or PYTHON_MAJOR_VER == 3) and + subtype != 1): # non OCTETS + val = b2u(val, self.__python_charset) + elif dtype in (blr_short, blr_long, blr_int64): val = bytes_to_int(buf[bufpos:bufpos+esize]) - if (subtype or scale): + if subtype or scale: val = decimal.Decimal(val) / _tenTo[abs(256-scale)] + elif dtype == blr_bool: + val = bytes_to_int(buf[bufpos:bufpos+esize]) == 1 elif dtype == blr_float: val = struct.unpack('f', buf[bufpos:bufpos+esize])[0] - elif dtype in (blr_d_float,blr_double): + elif dtype in (blr_d_float, blr_double): val = struct.unpack('d', buf[bufpos:bufpos+esize])[0] elif dtype == blr_timestamp: yyyy, mm, dd = self._parse_date(buf[bufpos:bufpos+4]) @@ -2838,20 +2893,22 @@ bufpos += esize else: for i in xrange(dimensions[dim]): - (val,bufpos) = self.__extract_db_array_to_list(esize,dtype,subtype,scale,dim+1,dimensions,buf,bufpos) + (val, bufpos) = self.__extract_db_array_to_list(esize, dtype, subtype, + scale, dim+1, dimensions, + buf, bufpos) value.append(val) - return (value,bufpos) + return (value, bufpos) - def __copy_list_to_db_array(self,esize,dtype,subtype,scale,dim,dimensions, - value,buf,bufpos): + def __copy_list_to_db_array(self, esize, dtype, subtype, scale, dim, dimensions, + value, buf, bufpos): """Copies Python list(s) to ARRRAY column data buffer. """ valuebuf = None - if dtype in (blr_text,blr_text2): - valuebuf = ctypes.create_string_buffer(bs([0]),esize) - elif dtype in (blr_varying,blr_varying2): - valuebuf = ctypes.create_string_buffer(bs([0]),esize) - elif dtype in (blr_short,blr_long,blr_int64): + if dtype in (blr_text, blr_text2): + valuebuf = ctypes.create_string_buffer(bs([0]), esize) + elif dtype in (blr_varying, blr_varying2): + valuebuf = ctypes.create_string_buffer(bs([0]), esize) + elif dtype in (blr_short, blr_long, blr_int64): if esize == 2: valuebuf = ISC_SHORT(0) elif esize == 4: @@ -2861,28 +2918,33 @@ else: raise OperationalError("Unsupported number type") elif dtype == blr_float: - valuebuf = ctypes.create_string_buffer(bs([0]),esize) - elif dtype in (blr_d_float,blr_double): - valuebuf = ctypes.create_string_buffer(bs([0]),esize) + valuebuf = ctypes.create_string_buffer(bs([0]), esize) + elif dtype in (blr_d_float, blr_double): + valuebuf = ctypes.create_string_buffer(bs([0]), esize) elif dtype == blr_timestamp: - valuebuf = ctypes.create_string_buffer(bs([0]),esize) + valuebuf = ctypes.create_string_buffer(bs([0]), esize) elif dtype == blr_sql_date: - valuebuf = ctypes.create_string_buffer(bs([0]),esize) + valuebuf = ctypes.create_string_buffer(bs([0]), esize) elif dtype == blr_sql_time: - valuebuf = ctypes.create_string_buffer(bs([0]),esize) + valuebuf = ctypes.create_string_buffer(bs([0]), esize) + elif dtype == blr_bool: + valuebuf = ctypes.create_string_buffer(bs([0]), esize) + #sqlvar.sqldata = ctypes.cast(ctypes.pointer( + #ctypes.create_string_buffer( + #int_to_bytes(value, sqlvar.sqllen))), buf_pointer) else: raise OperationalError("Unsupported Firebird ARRAY subtype: %i" % dtype) - self.__fill_db_array_buffer(esize,dtype, - subtype,scale, - dim,dimensions, - value,valuebuf, - buf,bufpos) - def __fill_db_array_buffer(self,esize,dtype,subtype,scale,dim,dimensions, - value,valuebuf,buf,bufpos): + self.__fill_db_array_buffer(esize, dtype, + subtype, scale, + dim, dimensions, + value, valuebuf, + buf, bufpos) + def __fill_db_array_buffer(self, esize, dtype, subtype, scale, dim, dimensions, + value, valuebuf, buf, bufpos): if dim == len(dimensions)-1: for i in xrange(dimensions[dim]): - if dtype in (blr_text,blr_text2, - blr_varying,blr_varying2): + if dtype in (blr_text, blr_text2, + blr_varying, blr_varying2): val = value[i] if isinstance(val, UnicodeType): val = val.encode(self.__python_charset) @@ -2891,9 +2953,9 @@ " expected %i, found %i" % (esize, len(val))) valuebuf.value = val - ctypes.memmove(ctypes.byref(buf,bufpos),valuebuf,esize) - elif dtype in (blr_short,blr_long,blr_int64): - if (subtype or scale): + ctypes.memmove(ctypes.byref(buf, bufpos), valuebuf, esize) + elif dtype in (blr_short, blr_long, blr_int64): + if subtype or scale: val = value[i] if isinstance(val, decimal.Decimal): val = int((val * _tenTo[256-abs(scale)]).to_integral()) @@ -2913,75 +2975,82 @@ valuebuf.value = value[i] else: raise OperationalError("Unsupported type") - ctypes.memmove(ctypes.byref(buf,bufpos), + ctypes.memmove(ctypes.byref(buf, bufpos), + ctypes.byref(valuebuf), + esize) + elif dtype == blr_bool: + valuebuf.value = int_to_bytes(1 if value[i] else 0, 1) + ctypes.memmove(ctypes.byref(buf, bufpos), ctypes.byref(valuebuf), esize) elif dtype == blr_float: valuebuf.value = struct.pack('f', value[i]) - ctypes.memmove(ctypes.byref(buf,bufpos),valuebuf,esize) - elif dtype in (blr_d_float,blr_double): + ctypes.memmove(ctypes.byref(buf, bufpos), valuebuf, esize) + elif dtype in (blr_d_float, blr_double): valuebuf.value = struct.pack('d', value[i]) - ctypes.memmove(ctypes.byref(buf,bufpos),valuebuf,esize) + ctypes.memmove(ctypes.byref(buf, bufpos), valuebuf, esize) elif dtype == blr_timestamp: valuebuf.value = self._convert_timestamp(value[i]) - ctypes.memmove(ctypes.byref(buf,bufpos),valuebuf,esize) + ctypes.memmove(ctypes.byref(buf, bufpos), valuebuf, esize) elif dtype == blr_sql_date: valuebuf.value = self._convert_date(value[i]) - ctypes.memmove(ctypes.byref(buf,bufpos),valuebuf,esize) + ctypes.memmove(ctypes.byref(buf, bufpos), valuebuf, esize) elif dtype == blr_sql_time: valuebuf.value = self._convert_time(value[i]) - ctypes.memmove(ctypes.byref(buf,bufpos),valuebuf,esize) + ctypes.memmove(ctypes.byref(buf, bufpos), valuebuf, esize) else: raise OperationalError("Unsupported Firebird ARRAY subtype: %i" % dtype) bufpos += esize else: for i in xrange(dimensions[dim]): - bufpos = self.__fill_db_array_buffer(esize,dtype,subtype, - scale,dim+1, - dimensions,value[i], - valuebuf,buf,bufpos) + bufpos = self.__fill_db_array_buffer(esize, dtype, subtype, + scale, dim+1, + dimensions, value[i], + valuebuf, buf, bufpos) return bufpos - def __validate_array_value(self,dim,dimensions,value_type,sqlsubtype, - value_scale,value): + def __validate_array_value(self, dim, dimensions, value_type, sqlsubtype, + value_scale, value): """Validates whether Python list(s) passed as ARRAY column value matches column definition (length, structure and value types). """ - ok = isinstance(value,(ibase.ListType,ibase.TupleType)) + ok = isinstance(value, (ibase.ListType, ibase.TupleType)) ok = ok and (len(value) == dimensions[dim]) if not ok: return False for i in xrange(dimensions[dim]): if dim == len(dimensions)-1: # leaf: check value type - if value_type in (blr_text,blr_text2, - blr_varying,blr_varying2): - ok = isinstance(value[i],(ibase.StringType,ibase.UnicodeType)) - elif value_type in (blr_short,blr_long,blr_int64): - if (sqlsubtype or value_scale): - ok = isinstance(value[i],decimal.Decimal) + if value_type in (blr_text, blr_text2, + blr_varying, blr_varying2): + ok = isinstance(value[i], (ibase.StringType, ibase.UnicodeType)) + elif value_type in (blr_short, blr_long, blr_int64): + if sqlsubtype or value_scale: + ok = isinstance(value[i], decimal.Decimal) else: - ok = isinstance(value[i],ibase.IntType) + ok = isinstance(value[i], ibase.IntType) elif value_type == blr_float: - ok = isinstance(value[i],ibase.FloatType) - elif value_type in (blr_d_float,blr_double): - ok = isinstance(value[i],ibase.FloatType) + ok = isinstance(value[i], ibase.FloatType) + elif value_type in (blr_d_float, blr_double): + ok = isinstance(value[i], ibase.FloatType) elif value_type == blr_timestamp: - ok = isinstance(value[i],datetime.datetime) + ok = isinstance(value[i], datetime.datetime) elif value_type == blr_sql_date: - ok = isinstance(value[i],datetime.date) + ok = isinstance(value[i], datetime.date) elif value_type == blr_sql_time: - ok = isinstance(value[i],datetime.time) + ok = isinstance(value[i], datetime.time) + elif value_type == blr_bool: + ok = isinstance(value[i], bool) else: ok = False else: # non-leaf: recurse down - ok = ok and self.__validate_array_value(dim+1,dimensions,value_type, - sqlsubtype,value_scale, + ok = ok and self.__validate_array_value(dim+1, dimensions, value_type, + sqlsubtype, value_scale, value[i]) if not ok: return False return ok - def __Tuple2XSQLDA(self, xsqlda, parameters): + def __tuple2xsqlda(self, xsqlda, parameters): """Move data from parameters to input XSQLDA. """ for i in xrange(xsqlda.sqld): @@ -2990,7 +3059,7 @@ vartype = sqlvar.sqltype & ~1 scale = sqlvar.sqlscale # NULL handling - if value == None: + if value is None: # Set the null flag whether sqlvar definition allows it or not, # to give BEFORE triggers to act on value without # our interference. @@ -3003,15 +3072,14 @@ # if sqlvar allows null, allocate the null flag # I don't know whether it's necessary, # but we'll do it anyway for safety - if ((sqlvar.sqltype & 1) != 0): + if (sqlvar.sqltype & 1) != 0: sqlvar.sqlind = ctypes.pointer(ISC_SHORT(0)) # Fill in value by type - if ((vartype != SQL_BLOB and - isinstance(value, (StringType, UnicodeType))) - or vartype in [SQL_TEXT, SQL_VARYING]): + if ((vartype != SQL_BLOB and isinstance(value, (StringType, UnicodeType))) + or vartype in [SQL_TEXT, SQL_VARYING]): # Place for Implicit Conversion of Input Parameters # to Strings - if not isinstance(value, (UnicodeType,StringType,ibase.mybytes)): + if not isinstance(value, (UnicodeType, StringType, ibase.mybytes)): value = str(value) # Place for Implicit Conversion of Input Parameters # from Strings @@ -3028,7 +3096,7 @@ elif vartype in [SQL_SHORT, SQL_LONG, SQL_INT64]: # It's scalled integer? - if (sqlvar.sqlsubtype or scale): + if sqlvar.sqlsubtype or scale: if isinstance(value, decimal.Decimal): value = int( (value * _tenTo[abs(scale)]).to_integral()) @@ -3068,19 +3136,19 @@ elif vartype == SQL_BOOLEAN: sqlvar.sqldata = ctypes.cast(ctypes.pointer( ctypes.create_string_buffer( - int_to_bytes(value, sqlvar.sqllen))), buf_pointer) + int_to_bytes(1 if value else 0, sqlvar.sqllen))), buf_pointer) elif vartype == SQL_BLOB: blobid = ISC_QUAD(0, 0) blob_handle = isc_blob_handle() - if hasattr(value,'read'): + if hasattr(value, 'read'): # It seems we've got file-like object, use stream BLOB api.isc_create_blob2(self._isc_status, - self.cursor._connection._db_handle, - self.cursor._transaction._tr_handle, - blob_handle, blobid, 4, - bs([ibase.isc_bpb_version1, - ibase.isc_bpb_type,1, - ibase.isc_bpb_type_stream])) + self.cursor._connection._db_handle, + self.cursor._transaction._tr_handle, + blob_handle, blobid, 4, + bs([ibase.isc_bpb_version1, + ibase.isc_bpb_type, 1, + ibase.isc_bpb_type_stream])) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, @@ -3092,14 +3160,11 @@ blob.raw = ibase.b(value_chunk) while len(value_chunk) > 0: api.isc_put_segment(self._isc_status, blob_handle, - len(value_chunk), - ctypes.byref(blob) - ) + len(value_chunk), ctypes.byref(blob)) if db_api_error(self._isc_status): - raise exception_from_status(DatabaseError, - self._isc_status, + raise exception_from_status(DatabaseError, self._isc_status, "Cursor.write_input_blob/isc_put_segment:") - ctypes.memset(blob,0,MAX_BLOB_SEGMENT_SIZE) + ctypes.memset(blob, 0, MAX_BLOB_SEGMENT_SIZE) value_chunk = value.read(MAX_BLOB_SEGMENT_SIZE) blob.raw = ibase.b(value_chunk) api.isc_close_blob(self._isc_status, blob_handle) @@ -3117,10 +3182,9 @@ ' acceptable input for' ' a non-textual BLOB column.') blob = ctypes.create_string_buffer(value) - api.isc_create_blob2(self._isc_status, - self.cursor._connection._db_handle, - self.cursor._transaction._tr_handle, - blob_handle, blobid, 0, None) + api.isc_create_blob2(self._isc_status, self.cursor._connection._db_handle, + self.cursor._transaction._tr_handle, + blob_handle, blobid, 0, None) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, @@ -3130,22 +3194,15 @@ total_size = len(value) bytes_written_so_far = 0 bytes_to_write_this_time = MAX_BLOB_SEGMENT_SIZE - while (bytes_written_so_far < total_size): - if ( - (total_size - bytes_written_so_far) < - MAX_BLOB_SEGMENT_SIZE - ): + while bytes_written_so_far < total_size: + if (total_size - bytes_written_so_far) < MAX_BLOB_SEGMENT_SIZE: bytes_to_write_this_time = (total_size - bytes_written_so_far) api.isc_put_segment(self._isc_status, blob_handle, - bytes_to_write_this_time, - ctypes.byref(blob, - bytes_written_so_far - ) - ) + bytes_to_write_this_time, + ctypes.byref(blob, bytes_written_so_far)) if db_api_error(self._isc_status): - raise exception_from_status(DatabaseError, - self._isc_status, + raise exception_from_status(DatabaseError, self._isc_status, "Cursor.write_input_blob/isc_put_segment:") bytes_written_so_far += bytes_to_write_this_time api.isc_close_blob(self._isc_status, blob_handle) @@ -3154,7 +3211,7 @@ self._isc_status, "Cursor.write_input_blob/isc_close_blob:") elif vartype == SQL_ARRAY: - arrayid = ISC_QUAD(0,0) + arrayid = ISC_QUAD(0, 0) arrayid_ptr = ctypes.pointer(arrayid) arraydesc = ISC_ARRAY_DESC(0) sqlvar.sqldata = ctypes.cast(ctypes.pointer(arrayid), @@ -3162,11 +3219,9 @@ sqlsubtype = self.cursor._connection._get_array_sqlsubtype(sqlvar.relname, sqlvar.sqlname) api.isc_array_lookup_bounds(self._isc_status, - self.cursor._connection._db_handle, - self.cursor._transaction._tr_handle, - sqlvar.relname, - sqlvar.sqlname, - arraydesc) + self.cursor._connection._db_handle, + self.cursor._transaction._tr_handle, + sqlvar.relname, sqlvar.sqlname, arraydesc) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, @@ -3174,7 +3229,7 @@ value_type = arraydesc.array_desc_dtype value_scale = arraydesc.array_desc_scale value_size = arraydesc.array_desc_length - if value_type in (blr_varying,blr_varying2): + if value_type in (blr_varying, blr_varying2): value_size += 2 dimensions = [] total_num_elements = 1 @@ -3184,27 +3239,24 @@ total_num_elements *= dimensions[dimension] total_size = total_num_elements * value_size # Validate value to make sure it matches the array structure - if not self.__validate_array_value(0,dimensions,value_type, + if not self.__validate_array_value(0, dimensions, value_type, sqlsubtype, - value_scale,value): + value_scale, value): raise ValueError("Incorrect ARRAY field value.") value_buffer = ctypes.create_string_buffer(total_size) tsize = ISC_LONG(total_size) - self.__copy_list_to_db_array(value_size,value_type, - sqlsubtype,value_scale, + self.__copy_list_to_db_array(value_size, value_type, + sqlsubtype, value_scale, 0, dimensions, - value,value_buffer,0) - api.isc_array_put_slice(self._isc_status, - self.cursor._connection._db_handle, - self.cursor._transaction._tr_handle, - arrayid_ptr, arraydesc, - value_buffer, - tsize) + value, value_buffer, 0) + api.isc_array_put_slice(self._isc_status, self.cursor._connection._db_handle, + self.cursor._transaction._tr_handle, arrayid_ptr, arraydesc, + value_buffer, tsize) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, "Cursor.read_otput_array/isc_array_put_slice:") - sqlvar.sqldata = ctypes.cast(arrayid_ptr,buf_pointer) + sqlvar.sqldata = ctypes.cast(arrayid_ptr, buf_pointer) def _free_handle(self): if self._stmt_handle != None and not self.__closed: self.__executed = False @@ -3214,12 +3266,10 @@ while len(self.__blob_readers) > 0: self.__blob_readers.pop().close() if self.statement_type == isc_info_sql_stmt_select: - api.isc_dsql_free_statement(self._isc_status, - self._stmt_handle, - ibase.DSQL_close) + api.isc_dsql_free_statement(self._isc_status, self._stmt_handle, ibase.DSQL_close) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, - "Error while releasing SQL statement handle:") + "Error while releasing SQL statement handle:") def _close(self): if self._stmt_handle != None: while len(self.__blob_readers) > 0: @@ -3237,8 +3287,8 @@ connection = self.cursor._connection if self.cursor else None if (not connection) or (connection and not connection.closed): api.isc_dsql_free_statement(self._isc_status, stmt_handle, ibase.DSQL_drop) - if (db_api_error(self._isc_status) - and (self._isc_status[1] not in [335544528,335544485])): + if (db_api_error(self._isc_status) and + (self._isc_status[1] not in [335544528, 335544485])): raise exception_from_status(DatabaseError, self._isc_status, "Error while closing SQL statement:") def _execute(self, parameters=None): @@ -3255,22 +3305,18 @@ for sqlvar in self._in_sqlda.sqlvar[:self.n_input_params]: sqlvar.sqltype, sqlvar.sqllen = self._in_sqlda_save[i] i += 1 - self.__Tuple2XSQLDA(self._in_sqlda, parameters) + self.__tuple2xsqlda(self._in_sqlda, parameters) xsqlda_in = ctypes.cast(ctypes.pointer(self._in_sqlda), XSQLDA_PTR) else: xsqlda_in = None # Execute the statement - if ((self.statement_type == isc_info_sql_stmt_exec_procedure) - and (self._out_sqlda.sqld > 0)): + if ((self.statement_type == isc_info_sql_stmt_exec_procedure) and + (self._out_sqlda.sqld > 0)): # NOTE: We have to pass xsqlda_out only for statements that return # single row xsqlda_out = ctypes.cast(ctypes.pointer(self._out_sqlda), XSQLDA_PTR) - api.isc_dsql_execute2(self._isc_status, - self.cursor._transaction._tr_handle, - self._stmt_handle, - self.__sql_dialect, - xsqlda_in, - xsqlda_out) + api.isc_dsql_execute2(self._isc_status, self.cursor._transaction._tr_handle, + self._stmt_handle, self.__sql_dialect, xsqlda_in, xsqlda_out) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, "Error while executing Stored Procedure:") @@ -3278,14 +3324,10 @@ # via fetch*() calls as Python DB API requires. However, it's not # possible to call fetch on open such statement, so we'll cache # the result and return it in fetchone instead calling fetch. - self.__output_cache = self.__XSQLDA2Tuple(self._out_sqlda) + self.__output_cache = self.__xsqlda2tuple(self._out_sqlda) else: - api.isc_dsql_execute2(self._isc_status, - self.cursor._transaction._tr_handle, - self._stmt_handle, - self.__sql_dialect, - xsqlda_in, - None) + api.isc_dsql_execute2(self._isc_status, self.cursor._transaction._tr_handle, + self._stmt_handle, self.__sql_dialect, xsqlda_in, None) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, "Error while executing SQL statement:") @@ -3294,8 +3336,7 @@ self.__closed = False self._last_fetch_status = ISC_STATUS(self.NO_FETCH_ATTEMPTED_YET) def _fetchone(self): - if (self._last_fetch_status == self.RESULT_SET_EXHAUSTED - and not self.__output_cache): + if self._last_fetch_status == self.RESULT_SET_EXHAUSTED and not self.__output_cache: return None if self.__executed: if self.__output_cache: @@ -3307,54 +3348,61 @@ return self.__output_cache else: if self.n_output_params == 0: - raise DatabaseError("Attempt to fetch row of results after statement that does not produce result set.") + raise DatabaseError("Attempt to fetch row of results after statement" + " that does not produce result set.") self._last_fetch_status = api.isc_dsql_fetch( self._isc_status, self._stmt_handle, self.__sql_dialect, ctypes.cast(ctypes.pointer(self._out_sqlda), XSQLDA_PTR)) if self._last_fetch_status == 0: - return self.__XSQLDA2Tuple(self._out_sqlda) + return self.__xsqlda2tuple(self._out_sqlda) elif self._last_fetch_status == self.RESULT_SET_EXHAUSTED: self._free_handle() return None else: if db_api_error(self._isc_status): - raise exception_from_status(DatabaseError, - self._isc_status, - "Cursor.fetchone:") + raise exception_from_status(DatabaseError, self._isc_status, "Cursor.fetchone:") elif self.__closed: raise ProgrammingError("Cannot fetch from closed cursor.") else: raise ProgrammingError("Cannot fetch from this cursor because" " it has not executed a statement.") def _set_cursor_name(self, name): - api.isc_dsql_set_cursor_name(self._isc_status, - self._stmt_handle, b(name), 0) + api.isc_dsql_set_cursor_name(self._isc_status, self._stmt_handle, b(name), 0) if db_api_error(self._isc_status): raise exception_from_status(OperationalError, self._isc_status, "Could not set cursor name:") self._name = name - def set_stream_blob(self,blob_name): + def set_stream_blob(self, blob_spec): """Specify a BLOB column(s) to work in `stream` mode instead classic, materialized mode. - :param blob_name: Single name or sequence of column names. Name must + :param blob_spec: Single name or sequence of column names. Name must be in format as it's stored in database (refer to :attr:`description` for real value). - :type blob_name: string or sequence + :type blob_spec: string or list .. important:: BLOB name is **permanently** added to the list of BLOBs handled as `stream` BLOBs by this instance. - :param string blob_name: Name of BLOB column. + :param string blob_spec: Name of BLOB column. """ - if isinstance(blob_name,ibase.StringType): - self.__streamed_blobs.append(blob_name) + if isinstance(blob_spec, ibase.StringType): + self.__streamed_blobs.append(blob_spec) else: - self.__streamed_blobs.extend(blob_name) + self.__streamed_blobs.extend(blob_spec) + def set_stream_blob_treshold(self, size): + """Specify max. blob size for materialized blobs. + If size of particular blob exceeds this threshold, returns streamed blob + (:class:`BlobReader`) instead string. Value -1 means no size limit (use + at your own risk). Default value is 64K + + :param integer size: Max. size for materialized blob. + """ + self.__streamed_blob_treshold = size def __del__(self): if self._stmt_handle != None: self._close() @@ -3446,7 +3494,7 @@ def __iter__(self): return self def __valid_ps(self): - return (self._ps is not None) and not (isinstance(self._ps,weakref.ProxyType) + return (self._ps is not None) and not (isinstance(self._ps, weakref.ProxyType) and not dir(self._ps)) def __get_description(self): if self.__valid_ps(): @@ -3464,7 +3512,7 @@ else: return None def __set_name(self, name): - if name == None or not isinstance(name, StringType): + if name is None or not isinstance(name, StringType): raise ProgrammingError("The name attribute can only be set to a" " string, and cannot be deleted") if not self.__valid_ps(): @@ -3484,12 +3532,13 @@ return self._connection def __get_transaction(self): return self._transaction - def __connection_deleted(self,obj): + def __connection_deleted(self, obj): self._connection = None - def __ps_deleted(self,obj): + def __ps_deleted(self, obj): self._ps = None def _set_as_internal(self): - self._connection = weakref.proxy(self._connection, _weakref_callback(self.__connection_deleted)) + self._connection = weakref.proxy(self._connection, + _weakref_callback(self.__connection_deleted)) def callproc(self, procname, parameters=None): """Call a stored database procedure with the given name. @@ -3501,8 +3550,8 @@ :type parameters: List or Tuple :returns: parameters, as required by Python DB API 2.0 Spec. :raises TypeError: When parameters is not List or Tuple. - :raises ProgrammingError: When more parameters than expected are suplied. - :raises DatabaseError: When error is returned by server. + :raises `~fdb.ProgrammingError`: When more parameters than expected are suplied. + :raises `~fdb.DatabaseError`: When error is returned by server. """ if not parameters: params = [] @@ -3520,8 +3569,7 @@ Closes any currently open :class:`PreparedStatement`. However, the cursor is still bound to :class:`Connection` and :class:`Transaction`, so it - could be still used to execute SQL statements. Also the cache with - prepared statements is left intact. + could be still used to execute SQL statements. .. warning:: @@ -3558,13 +3606,16 @@ :raises ValueError: When operation PreparedStatement belongs to different Cursor instance. :raises TypeError: When parameters is not List or Tuple. - :raises ProgrammingError: When more parameters than expected are suplied. - :raises DatabaseError: When error is returned by server. + :raises `~fdb.ProgrammingError`: When more parameters than expected are suplied. + :raises `~fdb.DatabaseError`: When error is returned by server. """ if is_dead_proxy(self._ps): self._ps = None if self._ps != None: - self._ps.close() + # Dirty trick to check whether operation when it's + # PreparedStatement is the one we (may) have weak proxy for + if self._ps.__repr__.__self__ is not operation: + self._ps.close() if not self._transaction.active: self._transaction.begin() if isinstance(operation, PreparedStatement): @@ -3587,8 +3638,8 @@ :param string operation: SQL command :returns: :class:`PreparedStatement` instance. - :raises DatabaseError: When error is returned by server. - :raises InternalError: On unexpected processing condition. + :raises `~fdb.DatabaseError`: When error is returned by server. + :raises `~fdb.InternalError`: On unexpected processing condition. """ if not self._transaction.active: self._transaction.begin() @@ -3617,11 +3668,11 @@ :raises ValueError: When operation PreparedStatement belongs to different Cursor instance. :raises TypeError: When seq_of_parameters is not List or Tuple. - :raises ProgrammingError: When there are more parameters in any sequence + :raises `~fdb.ProgrammingError`: When there are more parameters in any sequence than expected. - :raises DatabaseError: When error is returned by server. + :raises `~fdb.DatabaseError`: When error is returned by server. """ - if not isinstance(operation,PreparedStatement): + if not isinstance(operation, PreparedStatement): operation = self.prep(operation) for parameters in seq_of_parameters: self.execute(operation, parameters) @@ -3630,10 +3681,10 @@ """Fetch the next row of a query result set. :returns: tuple of returned values, or None when no more data is available. - :raises DatabaseError: When error is returned by server. - :raises ProgrammingError: When underlying :class:`PreparedStatement` is - closed, statement was not yet executed, or - unknown status is returned by fetch operation. + :raises `~fdb.DatabaseError`: When error is returned by server. + :raises `~fdb.ProgrammingError`: When underlying :class:`PreparedStatement` is + closed, statement was not yet executed, or + unknown status is returned by fetch operation. """ if self._ps: return self._ps._fetchone() @@ -3652,10 +3703,10 @@ :param integer size: Max. number of rows to fetch. :returns: List of tuples, where each tuple is one row of returned values. - :raises DatabaseError: When error is returned by server. - :raises ProgrammingError: When underlying :class:`PreparedStatement` is - closed, statement was not yet executed, or - unknown status is returned by fetch operation. + :raises `~fdb.DatabaseError`: When error is returned by server. + :raises `~fdb.ProgrammingError`: When underlying :class:`PreparedStatement` is + closed, statement was not yet executed, or + unknown status is returned by fetch operation. """ i = 0 result = [] @@ -3671,10 +3722,10 @@ """Fetch all (remaining) rows of a query result. :returns: List of tuples, where each tuple is one row of returned values. - :raises DatabaseError: When error is returned by server. - :raises ProgrammingError: When underlying :class:`PreparedStatement` is - closed, statement was not yet executed, or - unknown status is returned by fetch operation. + :raises `~fdb.DatabaseError`: When error is returned by server. + :raises `~fdb.ProgrammingError`: When underlying :class:`PreparedStatement` is + closed, statement was not yet executed, or + unknown status is returned by fetch operation. """ return [row for row in self] def fetchonemap(self): @@ -3684,10 +3735,10 @@ :returns: :class:`fbcore._RowMapping` of returned values, or None when no more data is available. - :raises DatabaseError: When error is returned by server. - :raises ProgrammingError: When underlying :class:`PreparedStatement` is - closed, statement was not yet executed, or - unknown status is returned by fetch operation. + :raises `~fdb.DatabaseError`: When error is returned by server. + :raises `~fdb.ProgrammingError`: When underlying :class:`PreparedStatement` is + closed, statement was not yet executed, or + unknown status is returned by fetch operation. """ row = self.fetchone() if row: @@ -3701,10 +3752,10 @@ :param integer size: Max. number of rows to fetch. :returns: List of :class:`fbcore._RowMapping` instances, one such instance for each row. - :raises DatabaseError: When error is returned by server. - :raises ProgrammingError: When underlying :class:`PreparedStatement` is - closed, statement was not yet executed, or - unknown status is returned by fetch operation. + :raises `~fdb.DatabaseError`: When error is returned by server. + :raises `~fdb.ProgrammingError`: When underlying :class:`PreparedStatement` is + closed, statement was not yet executed, or + unknown status is returned by fetch operation. """ i = 0 result = [] @@ -3723,10 +3774,10 @@ :returns: List of :class:`fbcore._RowMapping` instances, one such instance for each row. - :raises DatabaseError: When error is returned by server. - :raises ProgrammingError: When underlying :class:`PreparedStatement` is - closed, statement was not yet executed, or - unknown status is returned by fetch operation. + :raises `~fdb.DatabaseError`: When error is returned by server. + :raises `~fdb.ProgrammingError`: When underlying :class:`PreparedStatement` is + closed, statement was not yet executed, or + unknown status is returned by fetch operation. """ return [row for row in self.itermap()] def iter(self): @@ -3752,14 +3803,14 @@ """Required by Python DB API 2.0, but pointless for Firebird, so it does nothing.""" pass - def set_stream_blob(self,blob_name): + def set_stream_blob(self, blob_name): """Specify a BLOB column(s) to work in `stream` mode instead classic, materialized mode for already executed statement. :param blob_name: Single name or sequence of column names. Name must be in format as it's stored in database (refer to :attr:`description` for real value). - :type blob_name: string or sequence + :type blob_name: string or list .. important:: @@ -3769,12 +3820,24 @@ the same command executed repeatedly will retain this setting. :param string blob_name: Name of BLOB column. - :raises ProgrammingError: + :raises `~fdb.ProgrammingError`: """ if self._ps: self._ps.set_stream_blob(blob_name) else: raise ProgrammingError + def set_stream_blob_treshold(self, size): + """Specify max. blob size for materialized blobs. + If size of particular blob exceeds this threshold, returns streamed blob + (:class:`BlobReader`) instead string. Value -1 means no size limit (use + at your own risk). Default value is 64K + + :param integer size: Max. size for materialized blob. + """ + if self._ps: + self._ps.set_stream_blob_treshold(size) + else: + raise ProgrammingError def __del__(self): self.close() #: (Read Only) Sequence of 7-item sequences. @@ -3848,7 +3911,7 @@ :param default_action: Action taken when active transaction is ended automatically (during :meth:`close` or :meth:`begin`). :type default_action: string 'commit' or 'rollback' - :raises ProgrammingError: When zero or more than 16 connections are given. + :raises `~fdb.ProgrammingError`: When zero or more than 16 connections are given. """ if len(connections) > 16: raise ProgrammingError("Transaction can't accept more than 16 Connections") @@ -3856,7 +3919,7 @@ raise ProgrammingError("Transaction requires at least one Connection") self._connections = [weakref.ref(c) for c in connections] self.__python_charset = connections[0]._python_charset - if default_tpb == None: + if default_tpb is None: self.default_tpb = ISOLATION_LEVEL_READ_COMMITED else: self.default_tpb = default_tpb @@ -3884,16 +3947,16 @@ c = cursor() if c: c.close() - def __con_in_list(self,connection): + def __con_in_list(self, connection): for con in self._connections: if con() == connection: return True return False def __get_default_action(self): return self.__default_action - def __set_default_action(self,action): + def __set_default_action(self, action): action = action.lower() - if not action in ('commit','rollback'): + if not action in ('commit', 'rollback'): raise ProgrammingError("Transaction's default action must be either" "'commit' or 'rollback'.") else: @@ -3929,17 +3992,15 @@ :param string sql: SQL statement to execute. - :raises DatabaseError: When error is returned from server. + :raises `~fdb.DatabaseError`: When error is returned from server. """ if not self.active: self.begin() for connection in self._connections: con = connection() sql = b(sql, con._python_charset) - api.isc_execute_immediate(self._isc_status, - con._db_handle, - self._tr_handle, - ctypes.c_short(len(sql)), sql) + api.isc_execute_immediate(self._isc_status, con._db_handle, self._tr_handle, + ctypes.c_short(len(sql)), sql) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, "Error while executing SQL statement:") @@ -3972,9 +4033,9 @@ a :meth:`commit` or :meth:`rollback` will be performed first, accordingly to :attr:`default_action` value. - :raises DatabaseError: When error is returned by server. - :raises ProgrammingError: When TPB is in usupported format, or transaction - is permanently :attr:`closed`. + :raises `~fdb.DatabaseError`: When error is returned by server. + :raises `~fdb.ProgrammingError`: When TPB is in usupported format, or transaction + is permanently :attr:`closed`. """ if self.__closed: raise ProgrammingError("Transaction is permanently closed.") @@ -3997,8 +4058,8 @@ _tpb = bs([isc_tpb_version3]) + _tpb if len(self._connections) == 1: api.isc_start_transaction(self._isc_status, self._tr_handle, 1, - self._connections[0]()._db_handle, - len(_tpb), _tpb) + self._connections[0]()._db_handle, + len(_tpb), _tpb) if db_api_error(self._isc_status): self._tr_handle = None raise exception_from_status(DatabaseError, self._isc_status, @@ -4010,10 +4071,7 @@ teb_array[i].db_ptr = ctypes.pointer(self._connections[i]()._db_handle) teb_array[i].tpb_len = len(_tpb) teb_array[i].tpb_ptr = _tpb - api.isc_start_multiple(self._isc_status, self._tr_handle, - cnum, - teb_array - ) + api.isc_start_multiple(self._isc_status, self._tr_handle, cnum, teb_array) if db_api_error(self._isc_status): self._tr_handle = None raise exception_from_status(DatabaseError, self._isc_status, @@ -4029,7 +4087,7 @@ :param boolean retaining: Indicates whether the transactional context of the transaction being resolved should be recycled. - :raises DatabaseError: When error is returned by server as response to commit. + :raises `~fdb.DatabaseError`: When error is returned by server as response to commit. """ if not self.active: return @@ -4059,8 +4117,8 @@ back only as far as the designated savepoint, rather than rolling back entirely. Mutually exclusive with 'retaining`. - :raises ProgrammingError: If both `savepoint` and `retaining` are specified. - :raises DatabaseError: When error is returned by server as response to rollback. + :raises `~fdb.ProgrammingError`: If both `savepoint` and `retaining` are specified. + :raises `~fdb.DatabaseError`: When error is returned by server as response to rollback. """ if not self.active: return @@ -4074,8 +4132,7 @@ api.isc_rollback_retaining(self._isc_status, self._tr_handle) else: self.__close_cursors() - api.isc_rollback_transaction(self._isc_status, - self._tr_handle) + api.isc_rollback_transaction(self._isc_status, self._tr_handle) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, "Error while rolling back transaction:") @@ -4119,7 +4176,7 @@ :param string name: Savepoint name. """ self.execute_immediate('SAVEPOINT %s' % name) - def cursor(self,connection = None): + def cursor(self, connection=None): """Creates a new :class:`Cursor` that will operate in the context of this Transaction. @@ -4128,10 +4185,10 @@ returned Cursor should be bound. :type connection: :class:`Connection` instance - :raises ProgrammingError: When transaction operates on multiple `Connections` - and: `connection` parameter is not specified, or - specified `connection` is not among `Connections` - this Transaction is bound to. + :raises `~fdb.ProgrammingError`: When transaction operates on multiple `Connections` + and: `connection` parameter is not specified, or + specified `connection` is not among `Connections` + this Transaction is bound to. """ if len(self._connections) > 1: if not connection: @@ -4158,37 +4215,36 @@ """ # We process request as a sequence of info codes, even if only one code # was supplied by the caller. - requestIsSingleton = isinstance(request, int) - if requestIsSingleton: + request_is_singleton = isinstance(request, int) + if request_is_singleton: request = (request,) results = {} - for infoCode in request: + for info_code in request: # The global().get(...) workaround is here because only recent # versions of FB expose constant isc_info_tra_isolation: - if infoCode == globals().get('isc_info_tra_isolation', -1): - buf = self.transaction_info(infoCode, 's') + if info_code == isc_info_tra_isolation: + buf = self.transaction_info(info_code, 'b') buf = buf[1 + struct.calcsize('h'):] if len(buf) == 1: - results[infoCode] = bytes_to_uint(buf) + results[info_code] = bytes_to_uint(buf) else: # For isolation level isc_info_tra_read_committed, the # first byte indicates the isolation level # (isc_info_tra_read_committed), while the second indicates # the record version flag (isc_info_tra_rec_version or # isc_info_tra_no_rec_version). - isolationLevelByte, recordVersionByte = struct.unpack('cc', - buf) - isolationLevel = bytes_to_uint(isolationLevelByte) - recordVersion = bytes_to_uint(recordVersionByte) - results[infoCode] = (isolationLevel, recordVersion) + isolation_level_byte, record_version_byte = struct.unpack('cc', buf) + isolation_level = bytes_to_uint(isolation_level_byte) + record_version = bytes_to_uint(record_version_byte) + results[info_code] = (isolation_level, record_version) else: # At the time of this writing (2006.02.09), # isc_info_tra_isolation is the only known return value of # isc_transaction_info that's not a simple integer. - results[infoCode] = self.transaction_info(infoCode, 'i') + results[info_code] = self.transaction_info(info_code, 'i') - if requestIsSingleton: + if request_is_singleton: return results[request[0]] else: return results @@ -4199,11 +4255,11 @@ :param integer info_code: One from the `isc_info_tra_*` constants. :param result_type: Code for result type. - :type result_type: string 's' or 'i' - :raises ProgrammingError: If transaction is not active. - :raises OperationalError: When result is too large to fit into buffer of - size SHRT_MAX. - :raises InternalError: On unexpected processing condition. + :type result_type: 'b' for binary string or 'i' for integer + :raises `~fdb.ProgrammingError`: If transaction is not active. + :raises `~fdb.OperationalError`: When result is too large to fit into buffer of + size SHRT_MAX. + :raises `~fdb.InternalError`: On unexpected processing condition. :raises ValueError: When illegal result type code is specified. """ self.__check_active() @@ -4212,11 +4268,11 @@ while True: res_buf = int2byte(0) * buf_size api.isc_transaction_info(self._isc_status, self._tr_handle, - len(request_buffer), request_buffer, - len(res_buf), res_buf) + len(request_buffer), request_buffer, + len(res_buf), res_buf) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, - "Error while requesting transaction information:") + "Error while requesting transaction information:") i = buf_size - 1 while i >= 0: if res_buf[i] != mychr(0): @@ -4231,8 +4287,8 @@ continue else: raise OperationalError("Result is too large to fit into" - " buffer of size SHRT_MAX, yet underlying info" - " function only accepts buffers with size <= SHRT_MAX.") + " buffer of size SHRT_MAX, yet underlying info" + " function only accepts buffers with size <= SHRT_MAX.") else: break if ord2(res_buf[i]) != isc_info_end: @@ -4242,11 +4298,10 @@ raise InternalError("Result code does not match request code.") if result_type.upper() == 'I': return bytes_to_int(res_buf[3:3 + bytes_to_int(res_buf[1:3])]) - elif result_type.upper() == 'S': - return p3fix(ctypes.string_at(res_buf, i),self.__python_charset) + elif result_type.upper() == 'B': + return ctypes.string_at(res_buf, i) else: - raise ValueError("Unknown result type requested (must be 'i'" - "or 's').") + raise ValueError("Unknown result type requested (must be 'i' or 'b').") def prepare(self): """Manually triggers the first phase of a two-phase commit (2PC). @@ -4266,7 +4321,7 @@ self.close() def isreadonly(self): "Returns True if transaction is Read Only." - return self.trans_info(isc_info_tra_access) == isc_info_tra_readonly; + return self.trans_info(isc_info_tra_access) == isc_info_tra_readonly #: (Read Only) (int) Internal ID (server-side) for transaction. transaction_id = property(__get_transaction_id) @@ -4279,7 +4334,7 @@ #: (Read/Write) (string) 'commit' or 'rollback', action to be #: taken when physical transaction has to be ended automatically. #: **Default is 'commit'**. - default_action = property(__get_default_action,__set_default_action) + default_action = property(__get_default_action, __set_default_action) #: (Read Only) (int) ID of Oldest Interesting Transaction when this transaction started. oit = property(__get_oit) #: (Read Only) (int) ID of Oldest Active Transaction when this transaction started. @@ -4329,7 +4384,7 @@ def __get_default_tpb(self): return self._default_tpb def __set_default_tpb(self, value): - self._default_tpb = _validateTPB(value) + self._default_tpb = _validate_tpb(value) def disband(self): """Forcefully deletes all connections from connection group. @@ -4357,10 +4412,10 @@ :param con: A :class:`Connection` instance to add to this group. :raises TypeError: When `con` is not :class:`Connection` instance. - :raises ProgrammingError: When `con` is already member of this or another - group, or :attr:`~Connection.closed`. - When this group has unresolved transaction or - contains 16 connections. + :raises `~fdb.ProgrammingError`: When `con` is already member of this or another + group, or :attr:`~Connection.closed`. + When this group has unresolved transaction or + contains 16 connections. """ ### CONTRAINTS ON $con: ### # con must be an instance of kinterbasdb.Connection: @@ -4372,7 +4427,7 @@ # con cannot belong to more than one group at a time: if con.group: raise ProgrammingError("con is already a member of another group;" - " it cannot belong to more than one group at once.") + " it cannot belong to more than one group at once.") # con must be connected to a database; it must not have been closed. if con.closed: raise ProgrammingError("con has been closed; it cannot join a group.") @@ -4384,16 +4439,15 @@ # self cannot accept new members while self has an unresolved # transaction: self.__require_transaction_state(False, - "Cannot add connection to group that has an unresolved transaction.") + "Cannot add connection to group that has an unresolved transaction.") self.__drop_transaction() # self cannot have more than DIST_TRANS_MAX_DATABASES members: if self.count() >= DIST_TRANS_MAX_DATABASES: raise ProgrammingError("The database engine limits the number of" - " database handles that can participate in a single" - " distributed transaction to %d or fewer; this group already" - " has %d members." - % (DIST_TRANS_MAX_DATABASES, self.count()) - ) + " database handles that can participate in a single" + " distributed transaction to %d or fewer; this group already" + " has %d members." + % (DIST_TRANS_MAX_DATABASES, self.count())) ### CONTRAINTS FINISHED ### # Can't set con.group directly (read-only); must use package-private @@ -4404,24 +4458,22 @@ """Removes specified connection from group. :param con: A :class:`Connection` instance to remove. - :raises ProgrammingError: When `con` doesn't belong to this group or - transaction is active. + :raises `~fdb.ProgrammingError`: When `con` doesn't belong to this group or + transaction is active. """ if con not in self: raise ProgrammingError("con is not a member of this group.") assert con.group is self - self.__require_transaction_state(False, - "Cannot remove connection from group that has an unresolved transaction.") + self.__require_transaction_state(False, "Cannot remove connection from group that has an unresolved transaction.") self.__drop_transaction() con._set_group(None) self._cons.remove(con) def clear(self): """Removes all connections from group. - :raises ProgrammingError: When transaction is active. + :raises `~fdb.ProgrammingError`: When transaction is active. """ - self.__require_transaction_state(False, - "Cannot clear group that has an unresolved transaction.") + self.__require_transaction_state(False, "Cannot clear group that has an unresolved transaction.") self.__drop_transaction() for con in self.members(): self.remove(con) @@ -4434,8 +4486,8 @@ .. note:: Automatically starts transaction if it's not already started. :param connection: :class:`Connection` instance. - :raises ProgrammingError: When group is empty or specified `connection` - doesn't belong to this group. + :raises `~fdb.ProgrammingError`: When group is empty or specified `connection` + doesn't belong to this group. """ if not self._transaction: self.__require_non_empty_group('start') @@ -4462,15 +4514,12 @@ self._transaction = None def __require_transaction_state(self, must_be_active, err_msg=''): transaction = self._transaction - if ( - (must_be_active and transaction is None) - or (not must_be_active and (transaction is not None and transaction.active)) - ): + if ((must_be_active and transaction is None) or + (not must_be_active and (transaction is not None and transaction.active))): raise ProgrammingError(err_msg) def __require_non_empty_group(self, operation_name): if self.count() == 0: - raise ProgrammingError("Cannot %s distributed transaction with" - " an empty ConnectionGroup." % operation_name) + raise ProgrammingError("Cannot %s distributed transaction with an empty ConnectionGroup." % operation_name) def __ensure_transaction(self): if not self._transaction: self.__require_non_empty_group('start') @@ -4495,7 +4544,7 @@ :param string sql: SQL statement to execute. - :raises DatabaseError: When error is returned from server. + :raises `~fdb.DatabaseError`: When error is returned from server. """ self.__ensure_transaction() self._transaction.execute_immediate(sql) @@ -4506,10 +4555,9 @@ transaction. If not specified, :attr:`default_tpb` is used. :type tpb: :class:`TPB` instance, list/tuple of `isc_tpb_*` constants or `bytestring` - :raises ProgrammingError: When group is empty or has active transaction. + :raises `~fdb.ProgrammingError`: When group is empty or has active transaction. """ - self.__require_transaction_state(False, - "Must resolve current transaction before starting another.") + self.__require_transaction_state(False, "Must resolve current transaction before starting another.") self.__ensure_transaction() self._transaction.begin(tpb) def savepoint(self, name): @@ -4517,7 +4565,7 @@ See :meth:`Transaction.savepoint` for details. :param string name: Name for savepoint. - :raises ProgrammingError: When group is empty. + :raises `~fdb.ProgrammingError`: When group is empty. """ self.__require_non_empty_group('savepoint') return self._transaction.savepoint(name) @@ -4526,7 +4574,7 @@ of this method is optional; if preparation is not triggered manually, it will be performed implicitly by commit() in a 2PC.""" self.__require_non_empty_group('prepare') - self.__require_transaction_state(True,"This group has no transaction to prepare.") + self.__require_transaction_state(True, "This group has no transaction to prepare.") self._transaction.prepare() def commit(self, retaining=False): """Commits distributed transaction over member connections using 2PC. @@ -4539,7 +4587,7 @@ :param boolean retaining: Indicates whether the transactional context of the transaction being resolved should be recycled. - :raises ProgrammingError: When group is empty. + :raises `~fdb.ProgrammingError`: When group is empty. """ self.__require_non_empty_group('commit') # The consensus among Python DB API experts is that transactions should @@ -4559,7 +4607,7 @@ :param boolean retaining: Indicates whether the transactional context of the transaction being resolved should be recycled. - :raises ProgrammingError: When group is empty. + :raises `~fdb.ProgrammingError`: When group is empty. """ self.__require_non_empty_group('rollback') # The consensus among Python DB API experts is that transactions should @@ -4598,7 +4646,7 @@ self.__tr_handle = tr_handle self.__is_text = is_text self.__charset = charset - self.__python_charset = charset_map.get(charset,charset) + self.__python_charset = charset_map.get(charset, charset) self.__blobid = blobid self.__opened = False self._blob_handle = isc_blob_handle() @@ -4609,13 +4657,10 @@ if not self.__opened: self.__open() def __open(self): - api.isc_open_blob2(self._isc_status, - self.__db_handle, - self.__tr_handle, - self._blob_handle, self.__blobid, 4, - bs([ibase.isc_bpb_version1, - ibase.isc_bpb_type,1, - ibase.isc_bpb_type_stream])) + api.isc_open_blob2(self._isc_status, self.__db_handle, self.__tr_handle, + self._blob_handle, self.__blobid, 4, + bs([ibase.isc_bpb_version1, ibase.isc_bpb_type, 1, + ibase.isc_bpb_type_stream])) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, @@ -4624,9 +4669,8 @@ result = ctypes.cast(ctypes.create_string_buffer(20), buf_pointer) api.isc_blob_info(self._isc_status, self._blob_handle, 2, - bs([isc_info_blob_total_length, - isc_info_blob_max_segment]), - 20, result) + bs([isc_info_blob_total_length, isc_info_blob_max_segment]), + 20, result) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, @@ -4651,26 +4695,22 @@ self.__buf_data = 0 self.__opened = True def __reset_buffer(self): - ctypes.memset(self.__buf,0,self._segment_size) + ctypes.memset(self.__buf, 0, self._segment_size) self.__buf_pos = 0 self.__buf_data = 0 - def __BLOB_get(self): + def __blob_get(self): self.__reset_buffer() # Load BLOB - allow_incomplete_segment_read = False + allow_incomplete_segment_read = True status = ISC_STATUS(0) bytes_read = 0 bytes_actually_read = ctypes.c_ushort(0) - status = api.isc_get_segment(self._isc_status, - self._blob_handle, - bytes_actually_read, - self._segment_size, - ctypes.byref(self.__buf)) + status = api.isc_get_segment(self._isc_status, self._blob_handle, bytes_actually_read, + self._segment_size, ctypes.byref(self.__buf)) if status != 0: if status == ibase.isc_segstr_eof: self.__buf_data = 0 - elif ((status == isc_segment) - and allow_incomplete_segment_read): + elif (status == isc_segment) and allow_incomplete_segment_read: self.__buf_data = bytes_actually_read.value else: raise exception_from_status(DatabaseError, @@ -4681,7 +4721,7 @@ def close(self): """Closes the Reader. Like :meth:`file.close`. - :raises DatabaseError: When error is returned by server. + :raises `~fdb.DatabaseError`: When error is returned by server. """ if self.__opened and not self.closed: self.__closed = True @@ -4707,14 +4747,14 @@ __next__ = next def __iter__(self): return self - def read(self, size = -1): + def read(self, size=-1): """Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. Like :meth:`file.read`. - :raises ProgrammingError: When reader is closed. + :raises `~fdb.ProgrammingError`: When reader is closed. .. note:: @@ -4723,7 +4763,7 @@ """ self.__ensure_open() if size >= 0: - to_read = min(size,self._blob_length - self.__pos) + to_read = min(size, self._blob_length - self.__pos) else: to_read = self._blob_length - self.__pos return_size = to_read @@ -4732,13 +4772,13 @@ while to_read > 0: to_copy = min(to_read, self.__buf_data - self.__buf_pos) if to_copy == 0: - self.__BLOB_get() + self.__blob_get() to_copy = min(to_read, self.__buf_data - self.__buf_pos) if to_copy == 0: # BLOB EOF break - ctypes.memmove(ctypes.byref(result,pos), - ctypes.byref(self.__buf,self.__buf_pos), + ctypes.memmove(ctypes.byref(result, pos), + ctypes.byref(self.__buf, self.__buf_pos), to_copy) pos += to_copy self.__pos += to_copy @@ -4746,7 +4786,7 @@ to_read -= to_copy result = result.raw[:return_size] if (self.__charset or PYTHON_MAJOR_VER == 3) and self.__is_text: - result = b2u(result,self.__python_charset) + result = b2u(result, self.__python_charset) return result def readline(self): """Read one entire line from the file. A trailing newline character is @@ -4754,7 +4794,7 @@ line). An empty string is returned when EOF is encountered immediately. Like :meth:`file.readline`. - :raises ProgrammingError: When reader is closed. + :raises `~fdb.ProgrammingError`: When reader is closed. .. note:: @@ -4769,7 +4809,7 @@ while to_read > 0 and not found: to_scan = min(to_read, self.__buf_data - self.__buf_pos) if to_scan == 0: - self.__BLOB_get() + self.__blob_get() to_scan = min(to_read, self.__buf_data - self.__buf_pos) if to_scan == 0: # BLOB EOF @@ -4782,15 +4822,15 @@ pos += 1 break pos += 1 - result = ctypes.string_at(ctypes.byref(self.__buf,self.__buf_pos), pos) + result = ctypes.string_at(ctypes.byref(self.__buf, self.__buf_pos), pos) if (self.__charset or PYTHON_MAJOR_VER == 3) and self.__is_text: - result = b2u(result,self.__python_charset) + result = b2u(result, self.__python_charset) line.append(result) self.__buf_pos += pos self.__pos += pos to_read -= pos return ''.join(line) - def readlines(self, sizehint = None): + def readlines(self, sizehint=None): """Read until EOF using :meth:`readline` and return a list containing the lines thus read. The optional sizehint argument (if present) is ignored. Like :meth:`file.readlines`. @@ -4806,7 +4846,7 @@ result.append(line) line = self.readline() return result - def seek(self, offset, whence = os.SEEK_SET): + def seek(self, offset, whence=os.SEEK_SET): """Set the file’s current position, like stdio‘s `fseek()`. See :meth:`file.seek` details. @@ -4814,7 +4854,7 @@ :param whence: (Optional) Context for offset. :type whence: os.SEEK_SET, os.SEEK_CUR or os.SEEK_END - :raises ProgrammingError: When reader is closed. + :raises `~fdb.ProgrammingError`: When reader is closed. .. warning:: @@ -4823,9 +4863,9 @@ """ self.__ensure_open() pos = ISC_LONG(0) - api.isc_seek_blob (self._isc_status, - self._blob_handle, - whence, ISC_LONG(offset), ctypes.byref(pos)) + api.isc_seek_blob(self._isc_status, + self._blob_handle, + whence, ISC_LONG(offset), ctypes.byref(pos)) if db_api_error(self._isc_status): raise exception_from_status(DatabaseError, self._isc_status, @@ -4836,6 +4876,55 @@ """Return current position in BLOB, like stdio‘s `ftell()` and :meth:`file.tell`.""" return self.__pos + def get_info(self): + """Return information about BLOB. + + :returns: Tuple with values: blob_length, segment_size, num_segments, blob_type + + Meaning of individual values: + + :blob_length: Total blob length in bytes + :segment_size: Size of largest segment + :num_segments: Number of segments + :blob_type: isc_bpb_type_segmented or isc_bpb_type_stream + """ + self.__ensure_open() + result = ctypes.cast(ctypes.create_string_buffer(30), + buf_pointer) + api.isc_blob_info(self._isc_status, self._blob_handle, 4, + bs([isc_info_blob_total_length, + isc_info_blob_max_segment, + isc_info_blob_num_segments, + isc_info_blob_type]), + 30, result) + if db_api_error(self._isc_status): + raise exception_from_status(DatabaseError, self._isc_status, "Source isc_blob_info failed:") + offset = 0 + while bytes_to_uint(result[offset]) != isc_info_end: + code = bytes_to_uint(result[offset]) + offset += 1 + if code == isc_info_blob_total_length: + length = bytes_to_uint(result[offset:offset + 2]) + blob_length = bytes_to_uint(result[ + offset + 2:offset + 2 + length]) + offset += length + 2 + elif code == isc_info_blob_max_segment: + length = bytes_to_uint(result[offset:offset + 2]) + segment_size = bytes_to_uint(result[ + offset + 2:offset + 2 + length]) + offset += length + 2 + elif code == isc_info_blob_num_segments: + length = bytes_to_uint(result[offset:offset + 2]) + num_segments = bytes_to_uint(result[ + offset + 2:offset + 2 + length]) + offset += length + 2 + elif code == isc_info_blob_type: + length = bytes_to_uint(result[offset:offset + 2]) + blob_type = bytes_to_uint(result[ + offset + 2:offset + 2 + length]) + offset += length + 2 + # + return (blob_length, segment_size, num_segments, blob_type) def __get_closed(self): return self.__closed def __get_mode(self): @@ -4867,39 +4956,38 @@ self._description = description fields = self._fields = {} pos = 0 - for fieldSpec in description: + for field_spec in description: # It's possible for a result set from the database engine to return # multiple fields with the same name, but kinterbasdb's key-based # row interface only honors the first (thus setdefault, which won't # store the position if it's already present in self._fields). - fields.setdefault(fieldSpec[DESCRIPTION_NAME], row[pos]) + fields.setdefault(field_spec[DESCRIPTION_NAME], row[pos]) pos += 1 def __len__(self): return len(self._fields) - def __getitem__(self, fieldName): + def __getitem__(self, field_name): fields = self._fields # Straightforward, unnormalized lookup will work if the fieldName is # already uppercase and/or if it refers to a database field whose # name is case-sensitive. - if fieldName in fields: - return fields[fieldName] + if field_name in fields: + return fields[field_name] else: - fieldNameNormalized = _normalizeDatabaseIdentifier(fieldName) + field_name_normalized = _normalize_db_identifier(field_name) try: - return fields[fieldNameNormalized] + return fields[field_name_normalized] except KeyError: raise KeyError('Result set has no field named "%s". The field' ' name must be one of: (%s)' - % (fieldName, ', '.join(fields.keys())) - ) - def get(self, fieldName, defaultValue=None): + % (field_name, ', '.join(fields.keys()))) + def get(self, field_name, default_value=None): try: - return self[fieldName] + return self[field_name] except KeyError: - return defaultValue - def __contains__(self, fieldName): + return default_value + def __contains__(self, field_name): try: - self[fieldName] + self[field_name] except KeyError: return False else: @@ -4908,32 +4996,32 @@ # Return an easily readable dump of this row's field names and their # corresponding values. return '' % ', '.join([ - '%s = %s' % (fieldName, self[fieldName]) - for fieldName in self._fields.keys() + '%s = %s' % (field_name, self[field_name]) + for field_name in self._fields.keys() ]) def keys(self): # Note that this is an *ordered* list of keys. return [fieldSpec[DESCRIPTION_NAME] for fieldSpec in self._description] def values(self): # Note that this is an *ordered* list of values. - return [self[fieldName] for fieldName in self.keys()] + return [self[field_name] for field_name in self.keys()] def items(self): - return [(fieldName, self[fieldName]) for fieldName in self.keys()] + return [(field_name, self[field_name]) for field_name in self.keys()] def iterkeys(self): - for fieldDesc in self._description: - yield fieldDesc[DESCRIPTION_NAME] + for field_desc in self._description: + yield field_desc[DESCRIPTION_NAME] __iter__ = iterkeys def itervalues(self): - for fieldName in self: - yield self[fieldName] + for field_name in self: + yield self[field_name] def iteritems(self): - for fieldName in self: - yield fieldName, self[fieldName] + for field_name in self: + yield field_name, self[field_name] class _TableAccessStats(object): """An internal class that wraps results from :meth:`~fdb.Connection.get_table_access_stats()`""" - def __init__(self,table_id): + def __init__(self, table_id): self.table_id = table_id self.table_name = None self.sequential = None @@ -4944,7 +5032,7 @@ self.backouts = None self.purges = None self.expunges = None - def _set_info(self,info_code,value): + def _set_info(self, info_code, value): if info_code == isc_info_read_seq_count: self.sequential = value elif info_code == isc_info_read_idx_count: @@ -4968,32 +5056,31 @@ def __init__(self, clusterIdentifier=None): self.clear() if clusterIdentifier: - self._addCode(clusterIdentifier) + self._add_code(clusterIdentifier) def render(self): # Convert the RequestBufferBuilder's components to a binary Python str. return b('').join(self._buffer) def clear(self): self._buffer = [] - def _extend(self, otherRequestBuilder): - self._buffer.append(otherRequestBuilder.render()) - def _addRaw(self, rawBuf): - assert isinstance(rawBuf, mybytes) - self._buffer.append(rawBuf) - def _addCode(self, code): + def _extend(self, other_req_builder): + self._buffer.append(other_req_builder.render()) + def _add_raw(self, raw_buf): + assert isinstance(raw_buf, mybytes) + self._buffer.append(raw_buf) + def _add_code(self, code): self._code2reqbuf(self._buffer, code) - def _code2reqbuf(self, reqBuf, code): + def _code2reqbuf(self, req_buf, code): if isinstance(code, str): assert len(code) == 1 code = ord(code) - # The database engine considers little-endian integers "portable"; they # need to have been converted to little-endianness before being sent # across the network. - reqBuf.append(struct.pack(' UINT_MAX): + if (not isinstance(lock_timeout, (int, mylong))) or (lock_timeout < 0 or lock_timeout > UINT_MAX): raise ValueError('Lock resolution must be either None' - ' or a non-negative int number of seconds between 0 and' - ' %d.' % UINT_MAX) + ' or a non-negative int number of seconds between 0 and' + ' %d.' % UINT_MAX) self._lock_timeout = lock_timeout #: (integer) Required lock timeout or None. @@ -5136,15 +5218,6 @@ if self._table_reservation is None: self._table_reservation = TableReservation() return self._table_reservation - def _set_table_reservation_access(self, _): - raise ProgrammingError('Instead of changing the value of the' - ' .table_reservation object itself, you must change its *elements*' - ' by manipulating it as though it were a dictionary that mapped' - '\n "TABLE_NAME": (sharingMode, accessMode)' - '\nFor example:' - '\n tpbBuilder.table_reservation["MY_TABLE"] =' - ' (kinterbasdb.isc_tpb_protected, kinterbasdb.isc_tpb_lock_write)' - ) #: (:class:`TableReservation`) Table reservation specification. #: @@ -5157,8 +5230,7 @@ #: .. code-block:: python #: #: tpb.table_reservation["MY_TABLE"] = (fdb.isc_tpb_protected, fdb.isc_tpb_lock_write) - table_reservation = property(_get_table_reservation, - _set_table_reservation_access) + table_reservation = property(_get_table_reservation) class TableReservation(object): @@ -5193,31 +5265,30 @@ return b('') frags = [] _ = frags.append - for tableName, resDefs in self.iteritems(): - tableNameLenWithTerm = len(b(tableName)) + 1 - for (sharingMode, accessMode) in resDefs: - _(int2byte(accessMode)) - _(struct.pack('' frags = ['') return ''.join(frags) def keys(self): @@ -5258,62 +5329,55 @@ else: return self._res.iteritems() def __setitem__(self, key, value): - key = self._validateKey(key) - key = _normalizeDatabaseIdentifier(key) + key = _validateKey(key) + key = _normalize_db_identifier(key) # If the += operator is being applied, the form of value will be like: # [(sharingMode0, accessMode0), ..., newSharingMode, newAccessMode] # For the sake of convenience, we detect this situation and handle it # "naturally". if isinstance(value, list) and len(value) >= 3: - otherValues = value[:-2] + other_values = value[:-2] value = tuple(value[-2:]) else: - otherValues = None - if ( - (not isinstance(value, tuple)) - or len(value) != 2 - or value[0] not in - (isc_tpb_shared, isc_tpb_protected, isc_tpb_exclusive) - or value[1] not in (isc_tpb_lock_read, isc_tpb_lock_write) - ): - raise ValueError('Table reservation entry must be a 2-tuple of' - ' the following form:\n' - 'element 0: sharing mode (one of (isc_tpb_shared,' - ' isc_tpb_protected, isc_tpb_exclusive))\n' - 'element 1: access mode (one of (isc_tpb_lock_read,' - ' isc_tpb_lock_write))\n' - '%s is not acceptable.' % str(value) - ) - if otherValues is None: + other_values = None + if ((not isinstance(value, tuple)) + or len(value) != 2 + or value[0] not in (isc_tpb_shared, isc_tpb_protected, isc_tpb_exclusive) + or value[1] not in (isc_tpb_lock_read, isc_tpb_lock_write)): + raise ValueError("""Table reservation entry must be a 2-tuple of the following form: +element 0: sharing mode (one of (isc_tpb_shared, isc_tpb_protected, isc_tpb_exclusive)) +element 1: access mode (one of (isc_tpb_lock_read, isc_tpb_lock_write)) +%s is not acceptable.""" % str(value)) + if other_values is None: value = [value] else: - otherValues.append(value) - value = otherValues + other_values.append(value) + value = other_values self._res[key] = value - def _validateKey(self, key): - ### Todo: verify handling of P version differences, refactor - if PYTHON_MAJOR_VER == 3: - keyMightBeAcceptable = isinstance(key, str) - if keyMightBeAcceptable and isinstance(key, str): - try: - key.encode('ASCII') - except UnicodeEncodeError: - keyMightBeAcceptable = False - if not keyMightBeAcceptable: - raise TypeError('Only str keys are allowed.') - else: - keyMightBeAcceptable = isinstance(key, basestring) - if keyMightBeAcceptable and isinstance(key, unicode): - try: - key = key.encode('ASCII') - except UnicodeEncodeError: - keyMightBeAcceptable = False - if not keyMightBeAcceptable: - raise TypeError('Only str keys are allowed.') - return key +def _validateKey(key): + ### Todo: verify handling of P version differences, refactor + if PYTHON_MAJOR_VER == 3: + acceptable_key = isinstance(key, str) + if acceptable_key and isinstance(key, str): + try: + key.encode('ASCII') + except UnicodeEncodeError: + acceptable_key = False + if not acceptable_key: + raise TypeError('Only str keys are allowed.') + else: + acceptable_key = isinstance(key, basestring) + if acceptable_key and isinstance(key, unicode): + try: + key = key.encode('ASCII') + except UnicodeEncodeError: + acceptable_key = False + if not acceptable_key: + raise TypeError('Only str keys are allowed.') + return key -def _validateTPB(tpb): +def _validate_tpb(tpb): if isinstance(tpb, TPB): # TPB's accessor methods perform their own validation, and its # render method takes care of infrastructural trivia. @@ -5337,7 +5401,7 @@ tpb = isc_tpb_version3 + tpb return tpb -def _normalizeDatabaseIdentifier(ident): +def _normalize_db_identifier(ident): if ident.startswith('"') and ident.endswith('"'): # Quoted name; leave the case of the field name untouched, but # strip the quotes. diff -Nru fdb-1.6.1+dfsg1/fdb/gstat.py fdb-2.0.0/fdb/gstat.py --- fdb-1.6.1+dfsg1/fdb/gstat.py 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/fdb/gstat.py 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,658 @@ +#coding:utf-8 +# +# PROGRAM/MODULE: fdb +# FILE: gstat.py +# DESCRIPTION: Python driver for Firebird - Firebird gstat output processing +# CREATED: 8.11.2017 +# +# Software distributed under the License is distributed AS IS, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. +# See the License for the specific language governing rights +# and limitations under the License. +# +# The Original Code was created by Pavel Cisar +# +# Copyright (c) Pavel Cisar +# and all contributors signed below. +# +# All Rights Reserved. +# Contributor(s): ______________________________________. +# +# See LICENSE.TXT for details. + +from fdb import ParseError +from fdb.utils import ObjectList +import datetime +import weakref +from collections import namedtuple +from locale import LC_ALL, getlocale, setlocale, resetlocale +import sys + +GSTAT_25 = 2 +GSTAT_30 = 3 + +ATTRIBUTES = ['force write', 'no reserve', 'shared cache disabled', + 'active shadow', 'multi-user maintenance', + 'single-user maintenance', 'full shutdown', 'read only', + 'backup lock', 'backup merge', 'wrong backup state'] + +ATTR_FORCE_WRITE = 0 #'force write' +ATTR_NO_RESERVE = 1 #'no reserve' +ATTR_NO_SHARED_CACHE = 2 #'shared cache disabled' +ATTR_ACTIVE_SHADOW = 3 #'active shadow' +ATTR_SHUTDOWN_MULTI = 4 #'multi-user maintenance' +ATTR_SHUTDOWN_SINGLE = 5 #'single-user maintenance' +ATTR_SHUTDOWN_FULL = 6 #'full shutdown' +ATTR_READ_ONLY = 7 #'read only' +ATTR_BACKUP_LOCK = 8 #'backup lock' +ATTR_BACKUP_MERGE = 9 #'backup merge' +ATTR_BACKUP_WRONG = 10 #'wrong backup state %d' + + +FillDistribution = namedtuple('FillDistribution', 'd20,d40,d50,d80,d100') +Encryption = namedtuple('Encryption', 'pages,encrypted,unencrypted') + +def empty_str(str_): + "Return True if string is empty (whitespace don't count) or None" + return true if str_ is None else str_.strip() == '' + + +class StatTable(object): + "Statisctics for single database table." + def __init__(self): + #: (str) Table name + self.name = None + #: (int) Table ID + self.table_id = None + #: (int) Primary Pointer Page for table + self.primary_pointer_page = None + #: (int) Index Root Page for table + self.index_root_page = None + #: (float) Average record length + self.avg_record_length = None + #: (int) Total number of record in table + self.total_records = None + #: (float) Average record version length + self.avg_version_length = None + #: (int) Total number of record versions + self.total_versions = None + #: (int) Max number of versions for single record + self.max_versions = None + #: (int) Number of data pages for table + self.data_pages = None + #: (int) Number of data page slots for table + self.data_page_slots = None + #: (float) Average data page fill ratio + self.avg_fill = None + #: (:class:`FillDistribution`) Data page fill distribution statistics + self.distribution = None + #: (:class:`~fdb.utils.ObjectList`) Indices belonging to table + self.indices = [] + +class StatTable3(StatTable): + "Statisctics for single database table (Firebird 3 and above)." + def __init__(self): + super(StatTable3, self).__init__() + #: (int) Number of Pointer Pages + self.pointer_pages = None + #: (int) Number of record formats + self.total_formats = None + #: (int) Number of actually used record formats + self.used_formats = None + #: (float) Average length of record fragments + self.avg_fragment_length = None + #: (int) Total number of record fragments + self.total_fragments = None + #: (int) Max number of fragments for single record + self.max_fragments = None + #: (float) Average length of unpacked record + self.avg_unpacked_length = None + #: (float) Record compression ratio + self.compression_ratio = None + #: (int) Number of Primary Data Pages + self.primary_pages = None + #: (int) Number of Secondary Data Pages + self.secondary_pages = None + #: (int) Number of swept data pages + self.swept_pages = None + #: (int) Number of empty data pages + self.empty_pages = None + #: (int) Number of full data pages + self.full_pages = None + #: (int) Number of BLOB values + self.blobs = None + #: (int) Total length of BLOB values (bytes) + self.blobs_total_length = None + #: (int) Number of BLOB pages + self.blob_pages = None + #: (int) Number of Level 0 BLOB values + self.level_0 = None + #: (int) Number of Level 1 BLOB values + self.level_1 = None + #: (int) Number of Level 2 BLOB values + self.level_2 = None + +class StatIndex(object): + "Statisctics for single database index." + def __init__(self, table): + #: (wekref.proxy) Proxy to parent :class:`TableStats` + self.table = weakref.proxy(table) + table.indices.append(weakref.proxy(self)) + #: (str) Index name + self.name = None + #: (int) Index ID + self.index_id = None + #: (int) Depth of index tree + self.depth = None + #: (int) Number of leaft index tree buckets + self.leaf_buckets = None + #: (int) Number of index tree nodes + self.nodes = None + #: (float) Average data length + self.avg_data_length = None + #: (int) Total number of duplicate keys + self.total_dup = None + #: (int) Max number of occurences for single duplicate key + self.max_dup = None + #: (:class:`FillDistribution`) Index page fill distribution statistics + self.distribution = None + +class StatIndex3(StatIndex): + "Statisctics for single database index (Firebird 3 and above)." + def __init__(self, table): + super(StatIndex3, self).__init__(table) + #: (int) Index Root page + self.root_page = None + #: (float) Average node length + self.avg_node_length = None + #: (float) Average key length + self.avg_key_length = None + #: (float) Index key compression ratio + self.compression_ratio = None + #: (float) Average key prefix length + self.avg_prefix_length = None + #: (float) Index clustering factor + self.clustering_factor = None + #: (float) Ration + self.ratio = None + +class StatDatabase(object): + """Firebird database statistics (produced by gstat). +""" + def __init__(self): + #: (int) GSTAT version + self.gstat_version = None + #: (int) System change number (v3 only) + self.system_change_number = None # ver3 + #: (datetime) gstat execution timestamp + self.executed = None + #: (datetime) gstat completion timestamp + self.completed = None # ver3 + #: (str) Database filename + self.filename = None + #: (int) Database flags + self.flags = 0 + #: (int) Checksum (v2 only) + self.checksum = 12345 # ver2 + #: (int) Database header generation + self.generation = 0 + #: (int) Database page size + self.page_size = 0 + #self.ods_version = None + #: (int) Oldest Interesting Transaction + self.oit = 0 + #: (int) Oldest Active Transaction + self.oat = 0 + #: (int) Oldest Snapshot Transaction + self.ost = 0 + #: (int) Next Transaction + self.next_transaction = 0 + #: (int) Bumped Transaction (v2 only) + self.bumped_transaction = None # ver2 + #self.sequence_number = 0 + #: (int) Next attachment ID + self.next_attachment_id = 0 + #: (int) Implementation ID (v2 only) + self.implementation_id = 0 # ver2 + #: (str) Implementation (v3 only) + self.implementation = None # ver3 + #: (int) Number of shadows + self.shadow_count = 0 + #: (int) Number of page buffers + self.page_buffers = 0 + #: (int) Next header page + self.next_header_page = 0 + #: (int) SQL Dialect + self.database_dialect = 0 + #: (datetime) Database creation timestamp + self.creation_date = None + #: (list) Database attributes + self.attributes = [] + # Variable data + #: (int) Sweep interval (variable hdr item) + self.sweep_interval = None + #: (str) Continuation file (variable hdr item) + self.continuation_file = None + #: (int) Last logical page (variable hdr item) + self.last_logical_page = None + #: (str) Backup GUID (variable hdr item) + self.backup_guid = None + #: (str) Root file name (variable hdr item) + self.root_filename = None + #: (str) replay logging file (variable hdr item) + self.replay_logging_file = None + #: (str) Backup difference file (variable hdr item) + self.backup_diff_file = None + #: (Encryption) Stats for enxrypted data page + self.encrypted_data_pages = None + #: (Encryption) Stats for enxrypted index page + self.encrypted_index_pages = None + #: (Encryption) Stats for enxrypted blob page + self.encrypted_blob_pages = None + #: List of database file names + self.continuation_files = [] + #: :class:`~fdb.utils.ObjectList` with :class:`StatTable` or :class:`StatTable3` instances + self.tables = None + #: :class:`~fdb.utils.ObjectList` with :class:`StatIndex` or :class:`StatIndex3` instances + self.indices = None + def has_table_stats(self): + """Return True if instance contains information about tables. + + .. important:: + + This is not the same as check for empty :data:`tables` list. When gstat is run with `-i` without + `-d` option, :data:`tables` list contains instances that does not have any other information about table + but table name and its indices. +""" + return self.tables[0].primary_pointer_page is not None if len(self.tables) > 0 else False + def has_row_stats(self): + "Return True if instance contains information about table rows." + return self.has_table_stats() and self.tables[0].avg_version_length is not None + def has_index_stats(self): + "Return True if instance contains information about indices." + return self.indices[0].depth is not None if len(self.indices) > 0 else False + def has_encryption_stats(self): + "Return True if instance contains information about database encryption." + return self.encrypted_data_pages is not None + def has_system(self): + "Return True if instance contains information about system tables." + return self.tables.contains('RDB$DATABASE', 'item.name') + + +def parse(lines): + """Parse output from Firebird gstat utility. + + :param lines: Iterable of lines produced by Firebird gstat utility. + :returns: :class:`~fdb.gstat.StatDatabase` instance with parsed results. + + :raises `~fdb.ParseError`: When any problem is found in input stream. +""" + def parse_hdr(line): + "Parse line from header" + for key, valtype, name in items_hdr: + if line.startswith(key): + # Check for GSTAT_VERSION + if db.gstat_version is None: + if key == 'Checksum': + db.gstat_version = GSTAT_25 + db.tables = ObjectList(_cls=StatTable, key_expr='item.name') + db.indices = ObjectList(_cls=StatIndex, key_expr='item.name') + elif key == 'System Change Number': + db.gstat_version = GSTAT_30 + db.tables = ObjectList(_cls=StatTable3, key_expr='item.name') + db.indices = ObjectList(_cls=StatIndex3, key_expr='item.name') + # + value = line[len(key):].strip() + if valtype == 'i': # integer + value = int(value) + elif valtype == 's': # string + pass + elif valtype == 'd': # date time + value = datetime.datetime.strptime(value, '%b %d, %Y %H:%M:%S') + elif valtype == 'l': # list + if value == '': + value = [] + else: + value = [x.strip() for x in value.split(',')] + value = tuple([ATTRIBUTES.index(x) for x in value]) + else: + raise ParseError("Unknown value type %s" % valtype) + if name is None: + name = key.lower().replace(' ', '_') + setattr(db, name, value) + return + raise ParseError('Unknown information (line %i)' % line_no) + def parse_var(line): + "Parse line from variable header data" + if line == '*END*': + return + for key, valtype, name in items_var: + if line.startswith(key): + value = line[len(key):].strip() + if valtype == 'i': # integer + value = int(value) + elif valtype == 's': # string + pass + elif valtype == 'd': # date time + value = datetime.datetime.strptime(value, '%b %d, %Y %H:%M:%S') + else: + raise ParseError("Unknown value type %s" % valtype) + if name is None: + name = key.lower().strip(':').replace(' ', '_') + setattr(db, name, value) + return + raise ParseError('Unknown information (line %i)' % line_no) + def parse_fseq(line): + "Parse line from file sequence" + if not line.startswith('File '): + raise ParseError("Bad file specification (line %i)" % line_no) + if 'is the only file' in line: + return + if ' is the ' in line: + db.continuation_files.append(line[5:line.index(' is the ')]) + elif ' continues as' in line: + db.continuation_files.append(line[5:line.index(' continues as')]) + else: + raise ParseError("Bad file specification (line %i)" % line_no) + def parse_table(line, table): + "Parse line from table data" + if table.name is None: + # We should parse header + tname, tid = line.split(' (') + table.name = tname.strip(' "') + table.table_id = int(tid.strip('()')) + else: + if ',' in line: # Data values + for item in line.split(','): + item = item.strip() + found = False + items = items_tbl2 if db.gstat_version == GSTAT_25 else items_tbl3 + for key, valtype, name in items: + if item.startswith(key): + value = item[len(key):].strip() + if valtype == 'i': # integer + value = int(value) + elif valtype == 'f': # float + value = float(value) + elif valtype == 'p': # % + value = int(value.strip('%')) + else: + raise ParseError("Unknown value type %s" % valtype) + if name is None: + name = key.lower().strip(':').replace(' ', '_') + setattr(table, name, value) + found = True + break + if not found: + raise ParseError('Unknown information (line %i)' % line_no) + else: # Fill distribution + if '=' in line: + fill_range, fill_value = line.split('=') + i = items_fill.index(fill_range.strip()) + if table.distribution is None: + table.distribution = [0, 0, 0, 0, 0] + table.distribution[i] = int(fill_value.strip()) + elif line.startswith('Fill distribution:'): + pass + else: + raise ParseError('Unknown information (line %i)' % line_no) + def parse_index(line, index): + "Parse line from index data" + if index.name is None: + # We should parse header + iname, iid = line[6:].split(' (') + index.name = iname.strip(' "') + index.index_id = int(iid.strip('()')) + else: + if ',' in line: # Data values + for item in line.split(','): + item = item.strip() + found = False + items = items_idx2 if db.gstat_version == GSTAT_25 else items_idx3 + for key, valtype, name in items: + if item.startswith(key): + value = item[len(key):].strip() + if valtype == 'i': # integer + value = int(value) + elif valtype == 'f': # float + value = float(value) + elif valtype == 'p': # % + value = int(value.strip('%')) + else: + raise ParseError("Unknown value type %s" % valtype) + if name is None: + name = key.lower().strip(':').replace(' ', '_') + setattr(index, name, value) + found = True + break + if not found: + raise ParseError('Unknown information (line %i)' % line_no) + else: # Fill distribution + if '=' in line: + fill_range, fill_value = line.split('=') + i = items_fill.index(fill_range.strip()) + if index.distribution is None: + index.distribution = [0, 0, 0, 0, 0] + index.distribution[i] = int(fill_value.strip()) + elif line.startswith('Fill distribution:'): + pass + else: + raise ParseError('Unknown information (line %i)' % line_no) + def parse_encryption(line): + "Parse line from encryption data" + try: + total, encrypted, unencrypted = line.split(',') + pad, total = total.rsplit(' ', 1) + total = int(total) + pad, encrypted = encrypted.rsplit(' ', 1) + encrypted = int(encrypted) + pad, unencrypted = unencrypted.rsplit(' ', 1) + unencrypted = int(unencrypted) + data = Encryption(total, encrypted, unencrypted) + except: + raise ParseError('Malformed encryption information (line %i)' % line_no) + if 'Data pages:' in line: + db.encrypted_data_pages = data + elif 'Index pages:' in line: + db.encrypted_index_pages = data + elif 'Blob pages:' in line: + db.encrypted_blob_pages = data + else: + raise ParseError('Unknown encryption information (line %i)' % line_no) + + # + items_hdr = [('Flags', 'i', None), + ('Checksum', 'i', None), + ('Generation', 'i', None), + ('System Change Number', 'i', 'system_change_number'), + ('Page size', 'i', None), + ('ODS version', 's', None), + ('Oldest transaction', 'i', 'oit'), + ('Oldest active', 'i', 'oat'), + ('Oldest snapshot', 'i', 'ost'), + ('Next transaction', 'i', None), + ('Bumped transaction', 'i', None), + ('Sequence number', 'i', None), + ('Next attachment ID', 'i', None), + ('Implementation ID', 'i', None), + ('Implementation', 's', None), + ('Shadow count', 'i', None), + ('Page buffers', 'i', None), + ('Next header page', 'i', None), + ('Database dialect', 'i', None), + ('Creation date', 'd', None), + ('Attributes', 'l', None)] + + items_var = [('Sweep interval:', 'i', None), + ('Continuation file:', 's', None), + ('Last logical page:', 'i', None), + ('Database backup GUID:', 's', 'backup_guid'), + ('Root file name:', 's', 'root_filename'), + ('Replay logging file:', 's', None), + ('Backup difference file:', 's', 'backup_diff_file')] + + items_tbl2 = [('Primary pointer page:', 'i', None), + ('Index root page:', 'i', None), + ('Pointer pages:', 'i', 'pointer_pages'), + ('Average record length:', 'f', 'avg_record_length'), + ('total records:', 'i', None), + ('Average version length:', 'f', 'avg_version_length'), + ('total versions:', 'i', None), + ('max versions:', 'i', None), + ('Data pages:', 'i', None), + ('data page slots:', 'i', None), + ('average fill:', 'p', 'avg_fill'), + ('Primary pages:', 'i', None), + ('secondary pages:', 'i', None), + ('swept pages:', 'i', None), + ('Empty pages:', 'i', None), + ('full pages:', 'i', None)] + + items_tbl3 = [('Primary pointer page:', 'i', None), + ('Index root page:', 'i', None), + ('Total formats:', 'i', None), + ('used formats:', 'i', None), + ('Average record length:', 'f', 'avg_record_length'), + ('total records:', 'i', None), + ('Average version length:', 'f', 'avg_version_length'), + ('total versions:', 'i', None), + ('max versions:', 'i', None), + ('Average fragment length:', 'f', 'avg_fragment_length'), + ('total fragments:', 'i', None), + ('max fragments:', 'i', None), + ('Average unpacked length:', 'f', 'avg_unpacked_length'), + ('compression ratio:', 'f', None), + ('Pointer pages:', 'i', 'pointer_pages'), + ('data page slots:', 'i', None), + ('Data pages:', 'i', None), + ('average fill:', 'p', 'avg_fill'), + ('Primary pages:', 'i', None), + ('secondary pages:', 'i', None), + ('swept pages:', 'i', None), + ('Empty pages:', 'i', None), + ('full pages:', 'i', None), + ('Blobs:', 'i', None), + ('total length:', 'i', 'blobs_total_length'), + ('blob pages:', 'i', None), + ('Level 0:', 'i', None), + ('Level 1:', 'i', None), + ('Level 2:', 'i', None)] + + items_idx2 = [('Depth:', 'i', None), + ('leaf buckets:', 'i', None), + ('nodes:', 'i', None), + ('Average data length:', 'f', 'avg_data_length'), + ('total dup:', 'i', None), + ('max dup:', 'i', None)] + items_idx3 = [('Root page:', 'i', None), + ('depth:', 'i', None), + ('leaf buckets:', 'i', None), + ('nodes:', 'i', None), + ('Average node length:', 'f', 'avg_node_length'), + ('total dup:', 'i', None), + ('max dup:', 'i', None), + ('Average key length:', 'f', 'avg_key_length'), + ('compression ratio:', 'f', None), + ('Average prefix length:', 'f', 'avg_prefix_length'), + ('average data length:', 'f', 'avg_data_length'), + ('Clustering factor:', 'f', None), + ('ratio:', 'f', None)] + + items_fill = ['0 - 19%', '20 - 39%', '40 - 59%', '60 - 79%', '80 - 99%'] + # + db = StatDatabase() + line_no = 0 + table = None + index = None + new_block = True + in_table = False + # + line_no = 0 + step = 0 # Look for sections and skip empty lines + try: + locale = getlocale(LC_ALL) + if sys.platform == 'win32': + setlocale(LC_ALL, 'English_United States') + else: + setlocale(LC_ALL, 'en_US') + # Skip empty lines at start + for line in (x.strip() for x in lines): + line_no += 1 + if line.startswith('Gstat completion time'): + db.completed = datetime.datetime.strptime(line[22:], '%a %b %d %H:%M:%S %Y') + elif step == 0: # Looking for section or db name + if line.startswith('Gstat execution time'): + db.executed = datetime.datetime.strptime(line[21:], '%a %b %d %H:%M:%S %Y') + elif line.startswith('Database header page information:'): + step = 1 + elif line.startswith('Variable header data:'): + step = 2 + elif line.startswith('Database file sequence:'): + step = 3 + elif 'encrypted' in line and 'non-crypted' in line: + parse_encryption(line) + elif line.startswith('Analyzing database pages ...'): + step = 4 + elif empty_str(line): + pass + elif line.startswith('Database "'): + x, s = line.split(' ') + db.filename = s.strip('"') + step = 0 + else: + raise ParseError("Unrecognized data (line %i)" % line_no) + elif step == 1: # Header + if empty_str(line): # section ends with empty line + step = 0 + else: + parse_hdr(line) + elif step == 2: # Variable data + if empty_str(line): # section ends with empty line + step = 0 + else: + parse_var(line) + elif step == 3: # File sequence + if empty_str(line): # section ends with empty line + step = 0 + else: + parse_fseq(line) + elif step == 4: # Tables and indices + if empty_str(line): # section ends with empty line + new_block = True + else: + if new_block: + new_block = False + if not line.startswith('Index '): + # Should be table + table = StatTable() if db.gstat_version == GSTAT_25 else StatTable3() + db.tables.append(table) + in_table = True + parse_table(line, table) + else: # It's index + index = StatIndex(table) if db.gstat_version == GSTAT_25 else StatIndex3(table) + db.indices.append(index) + in_table = False + parse_index(line, index) + else: + if in_table: + parse_table(line, table) + else: + parse_index(line, index) + # Final touch + if db.has_table_stats(): + for table in db.tables: + table.distribution = FillDistribution(*table.distribution) + if db.has_index_stats(): + for index in db.indices: + index.distribution = FillDistribution(*index.distribution) + db.tables.freeze() + db.indices.freeze() + finally: + if locale[0] is None: + if sys.platform == 'win32': + setlocale(LC_ALL, '') + else: + resetlocale(LC_ALL) + else: + setlocale(LC_ALL, locale) + return db diff -Nru fdb-1.6.1+dfsg1/fdb/ibase.py fdb-2.0.0/fdb/ibase.py --- fdb-1.6.1+dfsg1/fdb/ibase.py 2016-04-11 10:05:48.000000000 +0000 +++ fdb-2.0.0/fdb/ibase.py 2018-04-26 14:39:03.000000000 +0000 @@ -2,7 +2,7 @@ # # PROGRAM/MODULE: fdb # FILE: ibase.py -# DESCRIPTION: Python ctypes interface to Firebird client library +# DESCRIPTION: Python driver for Firebird - Python ctypes interface to Firebird client library # CREATED: 6.10.2011 # # Software distributed under the License is distributed AS IS, @@ -12,7 +12,7 @@ # # The Original Code was created by Pavel Cisar # -# Copyright (c) 2011 Pavel Cisar +# Copyright (c) Pavel Cisar # and all contributors signed below. # # All Rights Reserved. @@ -21,10 +21,13 @@ # # See LICENSE.TXT for details. -from ctypes import * +#from ctypes import * +from ctypes import c_char_p, c_wchar_p, c_char, c_byte, c_ubyte, c_int, c_uint, c_short, c_ushort, \ + c_long, c_ulong, c_longlong, c_ulonglong, c_void_p, c_int8, c_int16, c_int32, c_int64, c_uint8, \ + c_uint16, c_uint32, c_uint64, POINTER, Structure, CFUNCTYPE, CDLL from ctypes.util import find_library import sys -import locale +from locale import getpreferredencoding import types import operator import platform @@ -36,15 +39,15 @@ if PYTHON_MAJOR_VER == 3: from queue import PriorityQueue - def nativestr(st,charset="latin-1"): - if st == None: + def nativestr(st, charset="latin-1"): + if st is None: return st elif isinstance(st, bytes): return st.decode(charset) else: return st - def b(st,charset="latin-1"): - if st == None: + def b(st, charset="latin-1"): + if st is None: return st elif isinstance(st, bytes): return st @@ -57,7 +60,7 @@ def s(st): return st - ord2 = lambda x: x if type(x) == IntType else ord(x) + ord2 = lambda x: x if isinstance(x, IntType) else ord(x) if sys.version_info[1] <= 1: def int2byte(i): @@ -80,18 +83,19 @@ UnicodeType = str TupleType = tuple xrange = range + StringTypes = str else: from Queue import PriorityQueue - def nativestr(st,charset="latin-1"): - if st == None: + def nativestr(st, charset="latin-1"): + if st is None: return st elif isinstance(st, unicode): return st.encode(charset) else: return st - def b(st,charset="latin-1"): - if st == None: + def b(st, charset="latin-1"): + if st is None: return st elif isinstance(st, types.StringType): return st @@ -119,6 +123,8 @@ UnicodeType = types.UnicodeType TupleType = types.TupleType xrange = xrange + StringTypes = (StringType, UnicodeType) + # Support routines from ctypesgen generated file. @@ -131,7 +137,7 @@ # typechecked, and will be converted to c_void_p. def UNCHECKED(type): if (hasattr(type, "_type_") and isinstance(type._type_, str) - and type._type_ != "P"): + and type._type_ != "P"): return type else: return c_void_p @@ -143,14 +149,16 @@ # Event queue operation (and priority) codes -OP_DIE = 1 +OP_DIE = 1 OP_RECORD_AND_REREGISTER = 2 +sys_encoding = getpreferredencoding() + charset_map = { # DB CHAR SET NAME : PYTHON CODEC NAME (CANONICAL) # ------------------------------------------------------------------------- - None : locale.getpreferredencoding(), - 'NONE' : locale.getpreferredencoding(), + None : getpreferredencoding(), + 'NONE' : getpreferredencoding(), 'OCTETS' : None, # Allow to pass through unchanged. 'UNICODE_FSS' : 'utf_8', 'UTF8' : 'utf_8', # (Firebird 2.0+) @@ -238,8 +246,7 @@ SQL_TYPE_TIME = 560 SQL_TYPE_DATE = 570 SQL_INT64 = 580 -# Firebird 3 -SQL_BOOLEAN = 32764 +SQL_BOOLEAN = 32764 # Firebird 3 SQL_NULL = 32766 SUBTYPE_NUMERIC = 1 @@ -275,9 +282,6 @@ blr_column_name2 = 22 # Added in FB 3.0 blr_bool = 23 -# -blr_domain_type_of = 0 -blr_domain_full = 1 # Rest of BLR is defined in fdb.blr # Database parameter block stuff @@ -286,7 +290,7 @@ isc_dpb_version2 = 2 # Firebird 3 isc_dpb_cdd_pathname = 1 isc_dpb_allocation = 2 -isc_dpb_journal = 3 +#isc_dpb_journal = 3 isc_dpb_page_size = 4 isc_dpb_num_buffers = 5 isc_dpb_buffer_length = 6 @@ -294,14 +298,14 @@ isc_dpb_garbage_collect = 8 isc_dpb_verify = 9 isc_dpb_sweep = 10 -isc_dpb_enable_journal = 11 -isc_dpb_disable_journal = 12 +#isc_dpb_enable_journal = 11 +#isc_dpb_disable_journal = 12 isc_dpb_dbkey_scope = 13 isc_dpb_number_of_users = 14 isc_dpb_trace = 15 isc_dpb_no_garbage_collect = 16 isc_dpb_damaged = 17 -isc_dpb_license = 18 +#isc_dpb_license = 18 isc_dpb_sys_user_name = 19 isc_dpb_encrypt_key = 20 isc_dpb_activate_shadow = 21 @@ -323,13 +327,13 @@ isc_dpb_old_start_page = 37 isc_dpb_old_start_seqno = 38 isc_dpb_old_start_file = 39 -isc_dpb_drop_walfile = 40 +#isc_dpb_drop_walfile = 40 isc_dpb_old_dump_id = 41 -isc_dpb_wal_backup_dir = 42 -isc_dpb_wal_chkptlen = 43 -isc_dpb_wal_numbufs = 44 -isc_dpb_wal_bufsize = 45 -isc_dpb_wal_grp_cmt_wait = 46 +#isc_dpb_wal_backup_dir = 42 +#isc_dpb_wal_chkptlen = 43 +#isc_dpb_wal_numbufs = 44 +#isc_dpb_wal_bufsize = 45 +#isc_dpb_wal_grp_cmt_wait = 46 isc_dpb_lc_messages = 47 isc_dpb_lc_ctype = 48 isc_dpb_cache_manager = 49 @@ -339,7 +343,7 @@ isc_dpb_reserved = 53 isc_dpb_overwrite = 54 isc_dpb_sec_attach = 55 -isc_dpb_disable_wal = 56 +#isc_dpb_disable_wal = 56 isc_dpb_connect_timeout = 57 isc_dpb_dummy_packet_interval = 58 isc_dpb_gbak_attach = 59 @@ -392,92 +396,82 @@ isc_info_req_delete_count = 16 # DB Info item codes -isc_info_db_id = 4 -isc_info_reads = 5 -isc_info_writes = 6 -isc_info_fetches = 7 -isc_info_marks = 8 -isc_info_implementation = 11 -isc_info_isc_version = 12 -isc_info_base_level = 13 # Note: This is useless info item, consider as obsolete +isc_info_db_id = 4 # [db_filename,site_name[,site_name...]] +isc_info_reads = 5 # number of page reads +isc_info_writes = 6 # number of page writes +isc_info_fetches = 7 # number of reads from the memory buffer cache +isc_info_marks = 8 # number of writes to the memory buffer cache +isc_info_implementation = 11 # (implementation code, implementation class) +isc_info_isc_version = 12 # interbase server version identification string +isc_info_base_level = 13 # capability version of the server isc_info_page_size = 14 -isc_info_num_buffers = 15 +isc_info_num_buffers = 15 # number of memory buffers currently allocated isc_info_limbo = 16 -isc_info_current_memory = 17 -isc_info_max_memory = 18 -isc_info_window_turns = 19 -isc_info_license = 20 -isc_info_allocation = 21 -isc_info_attachment_id = 22 -isc_info_read_seq_count = 23 -isc_info_read_idx_count = 24 -isc_info_insert_count = 25 -isc_info_update_count = 26 -isc_info_delete_count = 27 -isc_info_backout_count = 28 -isc_info_purge_count = 29 -isc_info_expunge_count = 30 -isc_info_sweep_interval = 31 -isc_info_ods_version = 32 -isc_info_ods_minor_version = 33 -isc_info_no_reserve = 34 -isc_info_logfile = 35 -isc_info_cur_logfile_name = 36 -isc_info_cur_log_part_offset = 37 -isc_info_num_wal_buffers = 38 -isc_info_wal_buffer_size = 39 -isc_info_wal_ckpt_length = 40 -isc_info_wal_cur_ckpt_interval = 41 -isc_info_wal_prv_ckpt_fname = 42 -isc_info_wal_prv_ckpt_poffset = 43 -isc_info_wal_recv_ckpt_fname = 44 -isc_info_wal_recv_ckpt_poffset = 45 -isc_info_wal_grpc_wait_usecs = 47 -isc_info_wal_num_io = 48 -isc_info_wal_avg_io_size = 49 -isc_info_wal_num_commits = 50 -isc_info_wal_avg_grpc_size = 51 -isc_info_forced_writes = 52 -isc_info_user_names = 53 -isc_info_page_errors = 54 -isc_info_record_errors = 55 -isc_info_bpage_errors = 56 -isc_info_dpage_errors = 57 -isc_info_ipage_errors = 58 -isc_info_ppage_errors = 59 -isc_info_tpage_errors = 60 -isc_info_set_page_buffers = 61 -isc_info_db_sql_dialect = 62 -isc_info_db_read_only = 63 -isc_info_db_size_in_pages = 64 +isc_info_current_memory = 17 # amount of server memory (in bytes) currently in use +isc_info_max_memory = 18 # maximum amount of memory (in bytes) used at one time since the first process attached to the database +# Obsolete 19-20 +isc_info_allocation = 21 # number of last database page allocated +isc_info_attachment_id = 22 # attachment id number +# all *_count codes below return {[table_id]=operation_count,...}; table IDs are in the system table RDB$RELATIONS. +isc_info_read_seq_count = 23 # number of sequential table scans (row reads) done on each table since the database was last attached +isc_info_read_idx_count = 24 # number of reads done via an index since the database was last attached +isc_info_insert_count = 25 # number of inserts into the database since the database was last attached +isc_info_update_count = 26 # number of database updates since the database was last attached +isc_info_delete_count = 27 # number of database deletes since the database was last attached +isc_info_backout_count = 28 # number of removals of a version of a record +isc_info_purge_count = 29 # number of removals of old versions of fully mature records (records that are committed, so that older ancestor versions are no longer needed) +isc_info_expunge_count = 30 # number of removals of a record and all of its ancestors, for records whose deletions have been committed +isc_info_sweep_interval = 31 # number of transactions that are committed between sweeps to remove database record versions that are no longer needed +isc_info_ods_version = 32 # On-disk structure (ODS) minor major version number +isc_info_ods_minor_version = 33 # On-disk structure (ODS) minor version number +isc_info_no_reserve = 34 # 20% page space reservation for holding backup versions of modified records: 0=yes, 1=no +# Obsolete 35-51 +isc_info_forced_writes = 52 # mode in which database writes are performed: 0=sync, 1=async +isc_info_user_names = 53 # array of names of all the users currently attached to the database +isc_info_page_errors = 54 # number of page level errors validate found +isc_info_record_errors = 55 # number of record level errors validate found +isc_info_bpage_errors = 56 # number of blob page errors validate found +isc_info_dpage_errors = 57 # number of data page errors validate found +isc_info_ipage_errors = 58 # number of index page errors validate found +isc_info_ppage_errors = 59 # number of pointer page errors validate found +isc_info_tpage_errors = 60 # number of transaction page errors validate found +isc_info_set_page_buffers = 61 # number of memory buffers that should be allocated +isc_info_db_sql_dialect = 62 # dialect of currently attached database +isc_info_db_read_only = 63 # whether the database is read-only (1) or not (0) +isc_info_db_size_in_pages = 64 # number of allocated pages # Values 65 -100 unused to avoid conflict with InterBase -frb_info_att_charset = 101 -isc_info_db_class = 102 -isc_info_firebird_version = 103 -isc_info_oldest_transaction = 104 -isc_info_oldest_active = 105 -isc_info_oldest_snapshot = 106 -isc_info_next_transaction = 107 -isc_info_db_provider = 108 -isc_info_active_transactions = 109 -isc_info_active_tran_count = 110 -isc_info_creation_date = 111 +frb_info_att_charset = 101 # charset of current attachment +isc_info_db_class = 102 # server architecture +isc_info_firebird_version = 103 # firebird server version identification string +isc_info_oldest_transaction = 104 # ID of oldest transaction +isc_info_oldest_active = 105 # ID of oldest active transaction +isc_info_oldest_snapshot = 106 # ID of oldest snapshot transaction +isc_info_next_transaction = 107 # ID of next transaction +isc_info_db_provider = 108 # for firebird is 'isc_info_db_code_firebird' +isc_info_active_transactions = 109 # array of active transaction IDs +isc_info_active_tran_count = 110 # number of active transactions +isc_info_creation_date = 111 # time_t struct representing database creation date & time isc_info_db_file_size = 112 # added in FB 2.1, nbackup-related - size (in pages) of locked db -fb_info_page_contents = 113 # added in FB 2.5 +fb_info_page_contents = 113 # added in FB 2.5, get raw page contents; takes page_number as parameter; # Added in Firebird 3.0 -fb_info_page_warns = 115 -fb_info_record_warns = 116 -fb_info_bpage_warns = 117 -fb_info_dpage_warns = 118 -fb_info_ipage_warns = 119 -fb_info_ppage_warns = 120 -fb_info_tpage_warns = 121 -fb_info_pip_errors = 122 -fb_info_pip_warns = 123 +fb_info_implementation = 114 # (cpu code, OS code, compiler code, flags, implementation class) +fb_info_page_warns = 115 # number of page level warnings validate found +fb_info_record_warns = 116 # number of record level warnings validate found +fb_info_bpage_warns = 117 # number of blob page level warnings validate found +fb_info_dpage_warns = 118 # number of data page level warnings validate found +fb_info_ipage_warns = 119 # number of index page level warnings validate found +fb_info_ppage_warns = 120 # number of pointer page level warnings validate found +fb_info_tpage_warns = 121 # number of trabsaction page level warnings validate found +fb_info_pip_errors = 122 # number of pip page level errors validate found +fb_info_pip_warns = 123 # number of pip page level warnings validate found isc_info_db_last_value = (fb_info_pip_warns + 1) isc_info_version = isc_info_isc_version +# flags set in fb_info_crypt_state +fb_info_crypt_encrypted = 0x01, +fb_info_crypt_process = 0x02 + # Blob information items isc_info_blob_num_segments = 4 isc_info_blob_max_segment = 5 @@ -486,14 +480,15 @@ # Transaction information items -isc_info_tra_id = 4 -isc_info_tra_oldest_interesting = 5 -isc_info_tra_oldest_snapshot = 6 -isc_info_tra_oldest_active = 7 -isc_info_tra_isolation = 8 -isc_info_tra_access = 9 -isc_info_tra_lock_timeout = 10 -fb_info_tra_dbpath = 11 # Firebird 3.0 +isc_info_tra_id = 4 # current tran ID number +isc_info_tra_oldest_interesting = 5 # oldest interesting tran ID when current tran started +isc_info_tra_oldest_snapshot = 6 # min. tran ID of tra_oldest_active +isc_info_tra_oldest_active = 7 # oldest active tran ID when current tran started +isc_info_tra_isolation = 8 # pair: {one of isc_info_tra_isolation_flags, [one of isc_info_tra_read_committed_flags]} +isc_info_tra_access = 9 # 'isc_info_tra_readonly' or 'isc_info_tra_readwrite' +isc_info_tra_lock_timeout = 10 # lock timeout value +# Firebird 3.0 +fb_info_tra_dbpath = 11 # db filename for current transaction # isc_info_tra_isolation responses isc_info_tra_consistency = 1 @@ -577,33 +572,32 @@ isc_tpb_lock_timeout = 21 # BLOB parameter buffer - -isc_bpb_version1 = 1 -isc_bpb_source_type = 1 -isc_bpb_target_type = 2 -isc_bpb_type = 3 -isc_bpb_source_interp = 4 -isc_bpb_target_interp = 5 -isc_bpb_filter_parameter = 6 +isc_bpb_version1 = 1 +isc_bpb_source_type = 1 +isc_bpb_target_type = 2 +isc_bpb_type = 3 +isc_bpb_source_interp = 4 +isc_bpb_target_interp = 5 +isc_bpb_filter_parameter = 6 # Added in FB 2.1 -isc_bpb_storage = 7 +isc_bpb_storage = 7 -isc_bpb_type_segmented = 0 -isc_bpb_type_stream = 1 +isc_bpb_type_segmented = 0 +isc_bpb_type_stream = 1 # Added in FB 2.1 -isc_bpb_storage_main = 0 -isc_bpb_storage_temp = 2 +isc_bpb_storage_main = 0 +isc_bpb_storage_temp = 2 # BLOB codes -isc_segment = 335544366 +isc_segment = 335544366 isc_segstr_eof = 335544367 # Services API # Service parameter block stuff isc_spb_current_version = 2 isc_spb_version = isc_spb_current_version -isc_spb_version3 = 3 +isc_spb_version3 = 3 # Firebird 3.0 isc_spb_user_name = isc_dpb_user_name isc_spb_sys_user_name = isc_dpb_sys_user_name isc_spb_sys_user_name_enc = isc_dpb_sys_user_name_enc @@ -621,7 +615,7 @@ # Added in FB 2.5 isc_spb_trusted_role = 113 # Added in FB 3.0 -isc_spb_verbint = 114 +isc_spb_verbint = 114 isc_spb_auth_block = 115 isc_spb_auth_plugin_name = 116 isc_spb_auth_plugin_list = 117 @@ -632,9 +626,9 @@ isc_spb_os_user = 122 isc_spb_config = 123 isc_spb_expected_db = 124 +# This will not be used in protocol 13, therefore may be reused isc_spb_specific_auth_data = isc_spb_trusted_auth - # Service action items isc_action_svc_backup = 1 # Starts database backup process on the server isc_action_svc_restore = 2 # Starts database restore process on the server @@ -680,6 +674,8 @@ isc_info_svc_limbo_trans = 66 # Retrieve the limbo transactions */ isc_info_svc_running = 67 # Checks to see if a service is running on an attachment */ isc_info_svc_get_users = 68 # Returns the user information from isc_action_svc_display_users */ +isc_info_svc_auth_block = 69 # FB 3.0: Sets authentication block for service query() call */ +isc_info_svc_stdin = 78 # Returns maximum size of data, needed as stdin for service */ # Parameters for isc_action_{add|del|mod|disp)_user isc_spb_sec_userid = 5 @@ -697,7 +693,7 @@ isc_spb_bkp_factor = 6 isc_spb_bkp_length = 7 isc_spb_bkp_skip_data = 8 # Firebird 3.0 -isc_spb_bkp_stat = 15 # Firebird 3.0 +isc_spb_bkp_stat = 15 # Firebird 2.5 isc_spb_bkp_ignore_checksums = 0x01 isc_spb_bkp_ignore_limbo = 0x02 isc_spb_bkp_metadata_only = 0x04 @@ -775,6 +771,9 @@ isc_spb_tra_id_64 = 46 isc_spb_single_tra_id_64 = 47 isc_spb_multi_tra_id_64 = 48 +isc_spb_rpr_commit_trans_64 = 49 +isc_spb_rpr_rollback_trans_64 = 50 +isc_spb_rpr_recover_two_phase_64 = 51 isc_spb_rpr_validate_db = 0x01 isc_spb_rpr_sweep_db = 0x02 @@ -917,7 +916,6 @@ isc_info_db_impl_linux_arm64 = 84 isc_info_db_impl_linux_ppc64el = 85 isc_info_db_impl_linux_ppc64 = 86 # Firebird 3.0 - isc_info_db_impl_last_value = (isc_info_db_impl_linux_ppc64 + 1) # Info DB provider @@ -942,8 +940,124 @@ isc_info_db_class_server_access = 14 isc_info_db_class_last_value = (isc_info_db_class_server_access+1) -# status codes -isc_segment = 335544366 +# Request information items +isc_info_number_messages = 4 +isc_info_max_message = 5 +isc_info_max_send = 6 +isc_info_max_receive = 7 +isc_info_state = 8 +isc_info_message_number = 9 +isc_info_message_size = 10 +isc_info_request_cost = 11 +isc_info_access_path = 12 +isc_info_req_select_count = 13 +isc_info_req_insert_count = 14 +isc_info_req_update_count = 15 +isc_info_req_delete_count = 16 + +# Access path items +isc_info_rsb_end = 0 +isc_info_rsb_begin = 1 +isc_info_rsb_type = 2 +isc_info_rsb_relation = 3 +isc_info_rsb_plan = 4 + +# RecordSource (RSB) types +isc_info_rsb_unknown = 1 +isc_info_rsb_indexed = 2 +isc_info_rsb_navigate = 3 +isc_info_rsb_sequential = 4 +isc_info_rsb_cross = 5 +isc_info_rsb_sort = 6 +isc_info_rsb_first = 7 +isc_info_rsb_boolean = 8 +isc_info_rsb_union = 9 +isc_info_rsb_aggregate = 10 +isc_info_rsb_merge = 11 +isc_info_rsb_ext_sequential = 12 +isc_info_rsb_ext_indexed = 13 +isc_info_rsb_ext_dbkey = 14 +isc_info_rsb_left_cross = 15 +isc_info_rsb_select = 16 +isc_info_rsb_sql_join = 17 +isc_info_rsb_simulate = 18 +isc_info_rsb_sim_cross = 19 +isc_info_rsb_once = 20 +isc_info_rsb_procedure = 21 +isc_info_rsb_skip = 22 +isc_info_rsb_virt_sequential = 23 +isc_info_rsb_recursive = 24 +# Firebird 3.0 +isc_info_rsb_window = 25 +isc_info_rsb_singular = 26 +isc_info_rsb_writelock = 27 +isc_info_rsb_buffer = 28 +isc_info_rsb_hash = 29 + +# Bitmap expressions +isc_info_rsb_and = 1 +isc_info_rsb_or = 2 +isc_info_rsb_dbkey = 3 +isc_info_rsb_index = 4 + +isc_info_req_active = 2 +isc_info_req_inactive = 3 +isc_info_req_send = 4 +isc_info_req_receive = 5 +isc_info_req_select = 6 +isc_info_req_sql_stall = 7 + +# Blob Subtypes +isc_blob_untyped = 0 +# internal subtypes +isc_blob_text = 1 +isc_blob_blr = 2 +isc_blob_acl = 3 +isc_blob_ranges = 4 +isc_blob_summary = 5 +isc_blob_format = 6 +isc_blob_tra = 7 +isc_blob_extfile = 8 +isc_blob_debug_info = 9 +isc_blob_max_predefined_subtype = 10 + +# Masks for fb_shutdown_callback +fb_shut_confirmation = 1 +fb_shut_preproviders = 2 +fb_shut_postproviders = 4 +fb_shut_finish = 8 +fb_shut_exit = 16 # Firebird 3.0 + +# Shutdown reasons, used by engine +# Users should provide positive values +fb_shutrsn_svc_stopped = -1 +fb_shutrsn_no_connection = -2 +fb_shutrsn_app_stopped = -3 +fb_shutrsn_device_removed = -4 # Not used by FB 3.0 +fb_shutrsn_signal = -5 +fb_shutrsn_services = -6 +fb_shutrsn_exit_called = -7 + +# Cancel types for fb_cancel_operation +fb_cancel_disable = 1 +fb_cancel_enable = 2 +fb_cancel_raise = 3 +fb_cancel_abort = 4 + +# Debug information items +fb_dbg_version = 1 +fb_dbg_end = 255 +fb_dbg_map_src2blr = 2 +fb_dbg_map_varname = 3 +fb_dbg_map_argument = 4 +# Firebird 3.0 +fb_dbg_subproc = 5 +fb_dbg_subfunc = 6 +fb_dbg_map_curname = 7 + +# sub code for fb_dbg_map_argument +fb_dbg_arg_input = 0 +fb_dbg_arg_output = 1 FB_API_HANDLE = c_uint if platform.architecture() == ('64bit', 'WindowsPE'): @@ -1154,9 +1268,6 @@ XSQLDA_PTR = POINTER(XSQLDA) -def portable_int (buf): - pass - class USER_SEC_DATA(Structure): pass USER_SEC_DATA._fields_ = [ @@ -1226,12 +1337,11 @@ size_t = c_ulong uintmax_t = c_ulong - class fbclient_API(object): """Firebird Client API interface object. Loads Firebird Client Library and exposes API functions as member methods. Uses :ref:`ctypes ` for bindings. """ - def __init__(self,fb_library_name=None): + def __init__(self, fb_library_name=None): def get_key(key, sub_key): try: @@ -1255,12 +1365,12 @@ # try find via installed Firebird server key = get_key(winreg.HKEY_LOCAL_MACHINE, - 'SOFTWARE\Firebird Project\Firebird Server\Instances') + 'SOFTWARE\\Firebird Project\\Firebird Server\\Instances') if not key: key = get_key(winreg.HKEY_LOCAL_MACHINE, - 'SOFTWARE\Wow6432Node\Firebird Project\Firebird Server\Instances') + 'SOFTWARE\\Wow6432Node\\Firebird Project\\Firebird Server\\Instances') if key: - instFold = winreg.QueryValueEx(key,'DefaultInstance') + instFold = winreg.QueryValueEx(key, 'DefaultInstance') fb_library_name = os.path.join(os.path.join(instFold[0], 'bin'), 'fbclient.dll') else: fb_library_name = find_library('fbclient') @@ -1274,198 +1384,206 @@ if not fb_library_name: raise Exception("The location of Firebird Client Library could not be determined.") elif not os.path.exists(fb_library_name): - raise Exception("Firebird Client Library '%s' not found" % fb_library_name) + path, file_name = os.path.split(fb_library_name) + file_name = find_library(file_name) + if not file_name: + raise Exception("Firebird Client Library '%s' not found" % fb_library_name) + else: + fb_library_name = file_name if sys.platform in ['win32', 'cygwin', 'os2', 'os2emx']: + from ctypes import WinDLL fb_library = WinDLL(fb_library_name) else: fb_library = CDLL(fb_library_name) + self.client_library = fb_library + #: isc_attach_database(POINTER(ISC_STATUS), c_short, STRING, POINTER(isc_db_handle), c_short, STRING) self.isc_attach_database = fb_library.isc_attach_database self.isc_attach_database.restype = ISC_STATUS self.isc_attach_database.argtypes = [POINTER(ISC_STATUS), c_short, STRING, POINTER(isc_db_handle), c_short, STRING] - + #: isc_array_gen_sdl(POINTER(ISC_STATUS), POINTER(ISC_ARRAY_DESC), POINTER(ISC_SHORT), POINTER(ISC_UCHAR), POINTER(ISC_SHORT)) self.isc_array_gen_sdl = fb_library.isc_array_gen_sdl self.isc_array_gen_sdl.restype = ISC_STATUS self.isc_array_gen_sdl.argtypes = [POINTER(ISC_STATUS), POINTER(ISC_ARRAY_DESC), POINTER(ISC_SHORT), POINTER(ISC_UCHAR), POINTER(ISC_SHORT)] - + #: isc_array_get_slice(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(ISC_QUAD), POINTER(ISC_ARRAY_DESC), c_void_p, POINTER(ISC_LONG)) self.isc_array_get_slice = fb_library.isc_array_get_slice self.isc_array_get_slice.restype = ISC_STATUS self.isc_array_get_slice.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(ISC_QUAD), POINTER(ISC_ARRAY_DESC), c_void_p, POINTER(ISC_LONG)] - + #: isc_array_lookup_bounds(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), STRING, STRING, POINTER(ISC_ARRAY_DESC)) self.isc_array_lookup_bounds = fb_library.isc_array_lookup_bounds self.isc_array_lookup_bounds.restype = ISC_STATUS self.isc_array_lookup_bounds.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), STRING, STRING, POINTER(ISC_ARRAY_DESC)] - + #: isc_array_lookup_desc(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), STRING, STRING, POINTER(ISC_ARRAY_DESC)) self.isc_array_lookup_desc = fb_library.isc_array_lookup_desc self.isc_array_lookup_desc.restype = ISC_STATUS self.isc_array_lookup_desc.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), STRING, STRING, POINTER(ISC_ARRAY_DESC)] - + #: isc_array_set_desc(POINTER(ISC_STATUS), STRING, STRING, POINTER(c_short), POINTER(c_short), POINTER(c_short), POINTER(ISC_ARRAY_DESC)) self.isc_array_set_desc = fb_library.isc_array_set_desc self.isc_array_set_desc.restype = ISC_STATUS self.isc_array_set_desc.argtypes = [POINTER(ISC_STATUS), STRING, STRING, POINTER(c_short), POINTER(c_short), POINTER(c_short), POINTER(ISC_ARRAY_DESC)] - + #: isc_array_put_slice(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(ISC_QUAD), POINTER(ISC_ARRAY_DESC), c_void_p, POINTER(ISC_LONG)) self.isc_array_put_slice = fb_library.isc_array_put_slice self.isc_array_put_slice.restype = ISC_STATUS self.isc_array_put_slice.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(ISC_QUAD), POINTER(ISC_ARRAY_DESC), c_void_p, POINTER(ISC_LONG)] - + #: isc_blob_default_desc(POINTER(ISC_BLOB_DESC), POINTER(ISC_UCHAR), POINTER(ISC_UCHAR)) self.isc_blob_default_desc = fb_library.isc_blob_default_desc self.isc_blob_default_desc.restype = None self.isc_blob_default_desc.argtypes = [POINTER(ISC_BLOB_DESC), POINTER(ISC_UCHAR), POINTER(ISC_UCHAR)] - + #: isc_blob_gen_bpb(POINTER(ISC_STATUS), POINTER(ISC_BLOB_DESC), POINTER(ISC_BLOB_DESC), c_ushort, POINTER(ISC_UCHAR), POINTER(c_ushort)) self.isc_blob_gen_bpb = fb_library.isc_blob_gen_bpb self.isc_blob_gen_bpb.restype = ISC_STATUS self.isc_blob_gen_bpb.argtypes = [POINTER(ISC_STATUS), POINTER(ISC_BLOB_DESC), POINTER(ISC_BLOB_DESC), c_ushort, POINTER(ISC_UCHAR), POINTER(c_ushort)] - + #: isc_blob_info(POINTER(ISC_STATUS), POINTER(isc_blob_handle), c_short, STRING, c_short, POINTER(c_char)) self.isc_blob_info = fb_library.isc_blob_info self.isc_blob_info.restype = ISC_STATUS self.isc_blob_info.argtypes = [POINTER(ISC_STATUS), POINTER(isc_blob_handle), c_short, STRING, c_short, POINTER(c_char)] - + #: isc_blob_lookup_desc(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(ISC_UCHAR), POINTER(ISC_UCHAR), POINTER(ISC_BLOB_DESC), POINTER(ISC_UCHAR)) self.isc_blob_lookup_desc = fb_library.isc_blob_lookup_desc self.isc_blob_lookup_desc.restype = ISC_STATUS self.isc_blob_lookup_desc.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(ISC_UCHAR), POINTER(ISC_UCHAR), POINTER(ISC_BLOB_DESC), POINTER(ISC_UCHAR)] - + #: isc_blob_set_desc(POINTER(ISC_STATUS), POINTER(ISC_UCHAR), POINTER(ISC_UCHAR), c_short, c_short, c_short, POINTER(ISC_BLOB_DESC)) self.isc_blob_set_desc = fb_library.isc_blob_set_desc self.isc_blob_set_desc.restype = ISC_STATUS self.isc_blob_set_desc.argtypes = [POINTER(ISC_STATUS), POINTER(ISC_UCHAR), POINTER(ISC_UCHAR), c_short, c_short, c_short, POINTER(ISC_BLOB_DESC)] - + #: isc_cancel_blob(POINTER(ISC_STATUS), POINTER(isc_blob_handle)) self.isc_cancel_blob = fb_library.isc_cancel_blob self.isc_cancel_blob.restype = ISC_STATUS self.isc_cancel_blob.argtypes = [POINTER(ISC_STATUS), POINTER(isc_blob_handle)] - + #: isc_cancel_events(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(ISC_LONG)) self.isc_cancel_events = fb_library.isc_cancel_events self.isc_cancel_events.restype = ISC_STATUS self.isc_cancel_events.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(ISC_LONG)] - + #: isc_close_blob(POINTER(ISC_STATUS), POINTER(isc_blob_handle)) self.isc_close_blob = fb_library.isc_close_blob self.isc_close_blob.restype = ISC_STATUS self.isc_close_blob.argtypes = [POINTER(ISC_STATUS), POINTER(isc_blob_handle)] - + #: isc_commit_retaining(POINTER(ISC_STATUS), POINTER(isc_tr_handle)) self.isc_commit_retaining = fb_library.isc_commit_retaining self.isc_commit_retaining.restype = ISC_STATUS self.isc_commit_retaining.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle)] - + #: isc_commit_transaction(POINTER(ISC_STATUS), POINTER(isc_tr_handle) self.isc_commit_transaction = fb_library.isc_commit_transaction self.isc_commit_transaction.restype = ISC_STATUS self.isc_commit_transaction.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle)] - + #: isc_create_blob(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(isc_blob_handle), POINTER(ISC_QUAD)) self.isc_create_blob = fb_library.isc_create_blob self.isc_create_blob.restype = ISC_STATUS self.isc_create_blob.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(isc_blob_handle), POINTER(ISC_QUAD)] - + #: isc_create_blob2(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(isc_blob_handle), POINTER(ISC_QUAD), c_short, STRING) self.isc_create_blob2 = fb_library.isc_create_blob2 self.isc_create_blob2.restype = ISC_STATUS self.isc_create_blob2.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(isc_blob_handle), POINTER(ISC_QUAD), c_short, STRING] - + #: isc_create_database(POINTER(ISC_STATUS), c_short, STRING, POINTER(isc_db_handle), c_short, STRING, c_short) self.isc_create_database = fb_library.isc_create_database self.isc_create_database.restype = ISC_STATUS self.isc_create_database.argtypes = [POINTER(ISC_STATUS), c_short, STRING, POINTER(isc_db_handle), c_short, STRING, c_short] - + #: isc_database_info(POINTER(ISC_STATUS), POINTER(isc_db_handle), c_short, STRING, c_short, STRING) self.isc_database_info = fb_library.isc_database_info self.isc_database_info.restype = ISC_STATUS self.isc_database_info.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), c_short, STRING, c_short, STRING] - + #: isc_decode_date(POINTER(ISC_QUAD), c_void_p) self.isc_decode_date = fb_library.isc_decode_date self.isc_decode_date.restype = None self.isc_decode_date.argtypes = [POINTER(ISC_QUAD), c_void_p] - + #: isc_decode_sql_date(POINTER(ISC_DATE), c_void_p) self.isc_decode_sql_date = fb_library.isc_decode_sql_date self.isc_decode_sql_date.restype = None self.isc_decode_sql_date.argtypes = [POINTER(ISC_DATE), c_void_p] - + #: isc_decode_sql_time(POINTER(ISC_TIME), c_void_p) self.isc_decode_sql_time = fb_library.isc_decode_sql_time self.isc_decode_sql_time.restype = None self.isc_decode_sql_time.argtypes = [POINTER(ISC_TIME), c_void_p] - + #: isc_decode_timestamp(POINTER(ISC_TIMESTAMP), c_void_p) self.isc_decode_timestamp = fb_library.isc_decode_timestamp self.isc_decode_timestamp.restype = None self.isc_decode_timestamp.argtypes = [POINTER(ISC_TIMESTAMP), c_void_p] - + #: isc_detach_database(POINTER(ISC_STATUS), POINTER(isc_db_handle)) self.isc_detach_database = fb_library.isc_detach_database self.isc_detach_database.restype = ISC_STATUS self.isc_detach_database.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle)] - + #: isc_drop_database(POINTER(ISC_STATUS), POINTER(isc_db_handle)) self.isc_drop_database = fb_library.isc_drop_database self.isc_drop_database.restype = ISC_STATUS self.isc_drop_database.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle)] - + #: isc_dsql_allocate_statement(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_stmt_handle) self.isc_dsql_allocate_statement = fb_library.isc_dsql_allocate_statement self.isc_dsql_allocate_statement.restype = ISC_STATUS self.isc_dsql_allocate_statement.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_stmt_handle)] - + #: isc_dsql_alloc_statement2(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_stmt_handle)) self.isc_dsql_alloc_statement2 = fb_library.isc_dsql_alloc_statement2 self.isc_dsql_alloc_statement2.restype = ISC_STATUS self.isc_dsql_alloc_statement2.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_stmt_handle)] - + #: isc_dsql_describe(POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA)) self.isc_dsql_describe = fb_library.isc_dsql_describe self.isc_dsql_describe.restype = ISC_STATUS self.isc_dsql_describe.argtypes = [POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA)] - + #: isc_dsql_describe_bind(POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA) self.isc_dsql_describe_bind = fb_library.isc_dsql_describe_bind self.isc_dsql_describe_bind.restype = ISC_STATUS self.isc_dsql_describe_bind.argtypes = [POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA)] - + #: isc_dsql_exec_immed2(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_ushort, STRING, c_ushort, POINTER(XSQLDA), POINTER(XSQLDA)) self.isc_dsql_exec_immed2 = fb_library.isc_dsql_exec_immed2 self.isc_dsql_exec_immed2.restype = ISC_STATUS self.isc_dsql_exec_immed2.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_ushort, STRING, c_ushort, POINTER(XSQLDA), POINTER(XSQLDA)] - + #: isc_dsql_execute(POINTER(ISC_STATUS), POINTER(isc_tr_handle), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA)) self.isc_dsql_execute = fb_library.isc_dsql_execute self.isc_dsql_execute.restype = ISC_STATUS self.isc_dsql_execute.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA)] - + #: isc_dsql_execute2(POINTER(ISC_STATUS), POINTER(isc_tr_handle), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA), POINTER(XSQLDA)) self.isc_dsql_execute2 = fb_library.isc_dsql_execute2 self.isc_dsql_execute2.restype = ISC_STATUS self.isc_dsql_execute2.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA), POINTER(XSQLDA)] - + #: isc_dsql_execute_immediate(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_ushort, STRING, c_ushort, POINTER(XSQLDA)) self.isc_dsql_execute_immediate = fb_library.isc_dsql_execute_immediate self.isc_dsql_execute_immediate.restype = ISC_STATUS self.isc_dsql_execute_immediate.argtypes = [POINTER(ISC_STATUS), @@ -1473,85 +1591,85 @@ POINTER(isc_tr_handle), c_ushort, STRING, c_ushort, POINTER(XSQLDA)] - + #: isc_dsql_fetch(POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA)) self.isc_dsql_fetch = fb_library.isc_dsql_fetch self.isc_dsql_fetch.restype = ISC_STATUS self.isc_dsql_fetch.argtypes = [POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA)] - + #: isc_dsql_finish(POINTER(isc_db_handle)) self.isc_dsql_finish = fb_library.isc_dsql_finish self.isc_dsql_finish.restype = ISC_STATUS self.isc_dsql_finish.argtypes = [POINTER(isc_db_handle)] - + #: isc_dsql_free_statement(POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort) self.isc_dsql_free_statement = fb_library.isc_dsql_free_statement self.isc_dsql_free_statement.restype = ISC_STATUS self.isc_dsql_free_statement.argtypes = [POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort] - + #: isc_dsql_insert(POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA)) self.isc_dsql_insert = fb_library.isc_dsql_insert self.isc_dsql_insert.restype = ISC_STATUS self.isc_dsql_insert.argtypes = [POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, POINTER(XSQLDA)] - + #: isc_dsql_prepare(POINTER(ISC_STATUS), POINTER(isc_tr_handle), POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, POINTER(XSQLDA) self.isc_dsql_prepare = fb_library.isc_dsql_prepare self.isc_dsql_prepare.restype = ISC_STATUS self.isc_dsql_prepare.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, POINTER(XSQLDA)] - + #: isc_dsql_set_cursor_name(POINTER(ISC_STATUS), POINTER(isc_stmt_handle), STRING, c_ushort) self.isc_dsql_set_cursor_name = fb_library.isc_dsql_set_cursor_name self.isc_dsql_set_cursor_name.restype = ISC_STATUS self.isc_dsql_set_cursor_name.argtypes = [POINTER(ISC_STATUS), POINTER(isc_stmt_handle), STRING, c_ushort] - + #: isc_dsql_sql_info(POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_short, STRING, c_short, STRING) self.isc_dsql_sql_info = fb_library.isc_dsql_sql_info self.isc_dsql_sql_info.restype = ISC_STATUS self.isc_dsql_sql_info.argtypes = [POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_short, STRING, c_short, STRING] - + #: isc_encode_date(c_void_p, POINTER(ISC_QUAD)) self.isc_encode_date = fb_library.isc_encode_date self.isc_encode_date.restype = None self.isc_encode_date.argtypes = [c_void_p, POINTER(ISC_QUAD)] - + #: isc_encode_sql_date(c_void_p, POINTER(ISC_DATE)) self.isc_encode_sql_date = fb_library.isc_encode_sql_date self.isc_encode_sql_date.restype = None self.isc_encode_sql_date.argtypes = [c_void_p, POINTER(ISC_DATE)] - + #: isc_encode_sql_time(c_void_p, POINTER(ISC_TIME)) self.isc_encode_sql_time = fb_library.isc_encode_sql_time self.isc_encode_sql_time.restype = None self.isc_encode_sql_time.argtypes = [c_void_p, POINTER(ISC_TIME)] - + #: isc_encode_timestamp(c_void_p, POINTER(ISC_TIMESTAMP)) self.isc_encode_timestamp = fb_library.isc_encode_timestamp self.isc_encode_timestamp.restype = None self.isc_encode_timestamp.argtypes = [c_void_p, POINTER(ISC_TIMESTAMP)] - + #: isc_event_counts(POINTER(RESULT_VECTOR), c_short, POINTER(ISC_UCHAR), POINTER(ISC_UCHAR)) self.isc_event_counts = fb_library.isc_event_counts self.isc_event_counts.restype = None self.isc_event_counts.argtypes = [POINTER(RESULT_VECTOR), c_short, POINTER(ISC_UCHAR), - POINTER(ISC_UCHAR)] - + POINTER(ISC_UCHAR)] + #: isc_expand_dpb(POINTER(STRING), POINTER(c_short)) self.isc_expand_dpb = fb_library.isc_expand_dpb self.isc_expand_dpb.restype = None self.isc_expand_dpb.argtypes = [POINTER(STRING), POINTER(c_short)] - + #: isc_modify_dpb(POINTER(STRING), POINTER(c_short), c_ushort, STRING, c_short) self.isc_modify_dpb = fb_library.isc_modify_dpb self.isc_modify_dpb.restype = c_int self.isc_modify_dpb.argtypes = [POINTER(STRING), POINTER(c_short), c_ushort, STRING, c_short] - + #: isc_free(STRING self.isc_free = fb_library.isc_free self.isc_free.restype = ISC_LONG self.isc_free.argtypes = [STRING] - + #: isc_get_segment(POINTER(ISC_STATUS), POINTER(isc_blob_handle), POINTER(c_ushort), c_ushort, c_void_p) self.isc_get_segment = fb_library.isc_get_segment self.isc_get_segment.restype = ISC_STATUS self.isc_get_segment.argtypes = [POINTER(ISC_STATUS), POINTER(isc_blob_handle), POINTER(c_ushort), c_ushort, c_void_p] #self.isc_get_segment.argtypes = [POINTER(ISC_STATUS), POINTER(isc_blob_handle), # POINTER(c_ushort), c_ushort, POINTER(c_char)] - + #: isc_get_slice(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(ISC_QUAD), c_short, STRING, c_short, POINTER(ISC_LONG), ISC_LONG, c_void_p, POINTER(ISC_LONG)) self.isc_get_slice = fb_library.isc_get_slice self.isc_get_slice.restype = ISC_STATUS self.isc_get_slice.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), @@ -1559,48 +1677,48 @@ c_short, STRING, c_short, POINTER(ISC_LONG), ISC_LONG, c_void_p, POINTER(ISC_LONG)] - + #: isc_interprete(STRING, POINTER(POINTER(ISC_STATUS))) self.isc_interprete = fb_library.isc_interprete self.isc_interprete.restype = ISC_LONG self.isc_interprete.argtypes = [STRING, POINTER(POINTER(ISC_STATUS))] - + #: fb_interpret(STRING, c_uint, POINTER(POINTER(ISC_STATUS))) self.fb_interpret = fb_library.fb_interpret self.fb_interpret.restype = ISC_LONG self.fb_interpret.argtypes = [STRING, c_uint, POINTER(POINTER(ISC_STATUS))] - + #: isc_open_blob(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(isc_blob_handle), POINTER(ISC_QUAD)) self.isc_open_blob = fb_library.isc_open_blob self.isc_open_blob.restype = ISC_STATUS self.isc_open_blob.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(isc_blob_handle), POINTER(ISC_QUAD)] - + #: isc_open_blob2(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(isc_blob_handle), POINTER(ISC_QUAD), ISC_USHORT, STRING) self.isc_open_blob2 = fb_library.isc_open_blob2 self.isc_open_blob2.restype = ISC_STATUS self.isc_open_blob2.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(isc_blob_handle), POINTER(ISC_QUAD), ISC_USHORT, STRING] # POINTER(ISC_UCHAR) - + #: isc_prepare_transaction2(POINTER(ISC_STATUS), POINTER(isc_tr_handle), ISC_USHORT, POINTER(ISC_UCHAR)) self.isc_prepare_transaction2 = fb_library.isc_prepare_transaction2 self.isc_prepare_transaction2.restype = ISC_STATUS self.isc_prepare_transaction2.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), ISC_USHORT, POINTER(ISC_UCHAR)] - + #: isc_print_sqlerror(ISC_SHORT, POINTER(ISC_STATUS)) self.isc_print_sqlerror = fb_library.isc_print_sqlerror self.isc_print_sqlerror.restype = None self.isc_print_sqlerror.argtypes = [ISC_SHORT, POINTER(ISC_STATUS)] - + #: isc_print_status(POINTER(ISC_STATUS)) self.isc_print_status = fb_library.isc_print_status self.isc_print_status.restype = ISC_STATUS self.isc_print_status.argtypes = [POINTER(ISC_STATUS)] - + #: isc_put_segment(POINTER(ISC_STATUS), POINTER(isc_blob_handle), c_ushort, c_void_p) self.isc_put_segment = fb_library.isc_put_segment self.isc_put_segment.restype = ISC_STATUS self.isc_put_segment.argtypes = [POINTER(ISC_STATUS), POINTER(isc_blob_handle), c_ushort, c_void_p] #self.isc_put_segment.argtypes = [POINTER(ISC_STATUS), POINTER(isc_blob_handle), # c_ushort, STRING] - + #: isc_put_slice(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(ISC_QUAD), c_short, STRING, c_short, POINTER(ISC_LONG), ISC_LONG, c_void_p) self.isc_put_slice = fb_library.isc_put_slice self.isc_put_slice.restype = ISC_STATUS self.isc_put_slice.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), @@ -1608,22 +1726,21 @@ c_short, STRING, c_short, POINTER(ISC_LONG), ISC_LONG, c_void_p] - + #: isc_que_events(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(ISC_LONG), c_short, POINTER(ISC_UCHAR), ISC_EVENT_CALLBACK, POINTER(ISC_UCHAR)) self.isc_que_events = fb_library.isc_que_events self.isc_que_events.restype = ISC_STATUS self.isc_que_events.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(ISC_LONG), c_short, POINTER(ISC_UCHAR), ISC_EVENT_CALLBACK, POINTER(ISC_UCHAR)] - + #: isc_rollback_retaining(POINTER(ISC_STATUS), POINTER(isc_tr_handle)) self.isc_rollback_retaining = fb_library.isc_rollback_retaining self.isc_rollback_retaining.restype = ISC_STATUS self.isc_rollback_retaining.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle)] - + #: isc_rollback_transaction(POINTER(ISC_STATUS), POINTER(isc_tr_handle)) self.isc_rollback_transaction = fb_library.isc_rollback_transaction self.isc_rollback_transaction.restype = ISC_STATUS - self.isc_rollback_transaction.argtypes = [POINTER(ISC_STATUS), - POINTER(isc_tr_handle)] - + self.isc_rollback_transaction.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle)] + #: isc_start_multiple(POINTER(ISC_STATUS), POINTER(isc_tr_handle), c_short, c_void_p) self.isc_start_multiple = fb_library.isc_start_multiple self.isc_start_multiple.restype = ISC_STATUS self.isc_start_multiple.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), @@ -1634,175 +1751,172 @@ POINTER(isc_tr_handle), c_short, POINTER(isc_db_handle), c_short, STRING) - self.isc_start_transaction = P_isc_start_transaction(('isc_start_transaction', - fb_library)) + self.isc_start_transaction = P_isc_start_transaction(('isc_start_transaction', fb_library)) else: + #: isc_start_transaction(POINTER(ISC_STATUS), POINTER(isc_tr_handle), c_short, POINTER(isc_db_handle), c_short, STRING) self.isc_start_transaction = fb_library.isc_start_transaction self.isc_start_transaction.restype = ISC_STATUS self.isc_start_transaction.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), c_short, POINTER(isc_db_handle), c_short, STRING] - + #: isc_sqlcode(POINTER(ISC_STATUS)) self.isc_sqlcode = fb_library.isc_sqlcode self.isc_sqlcode.restype = ISC_LONG self.isc_sqlcode.argtypes = [POINTER(ISC_STATUS)] - + #: isc_sql_interprete(c_short, STRING, c_short) self.isc_sql_interprete = fb_library.isc_sql_interprete self.isc_sql_interprete.restype = None self.isc_sql_interprete.argtypes = [c_short, STRING, c_short] - + #: isc_transaction_info(POINTER(ISC_STATUS), POINTER(isc_tr_handle), c_short, STRING, c_short, STRING) self.isc_transaction_info = fb_library.isc_transaction_info self.isc_transaction_info.restype = ISC_STATUS self.isc_transaction_info.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), c_short, STRING, c_short, STRING] - + #: isc_transact_request(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_ushort, STRING, c_ushort, STRING, c_ushort, STRING) self.isc_transact_request = fb_library.isc_transact_request self.isc_transact_request.restype = ISC_STATUS self.isc_transact_request.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_ushort, STRING, c_ushort, STRING, c_ushort, STRING] - + #: isc_vax_integer(STRING, c_short) self.isc_vax_integer = fb_library.isc_vax_integer self.isc_vax_integer.restype = ISC_LONG self.isc_vax_integer.argtypes = [STRING, c_short] - + #: isc_portable_integer(POINTER(ISC_UCHAR), c_short) self.isc_portable_integer = fb_library.isc_portable_integer self.isc_portable_integer.restype = ISC_INT64 self.isc_portable_integer.argtypes = [POINTER(ISC_UCHAR), c_short] - + #: isc_add_user(POINTER(ISC_STATUS), POINTER(USER_SEC_DATA)) self.isc_add_user = fb_library.isc_add_user self.isc_add_user.restype = ISC_STATUS self.isc_add_user.argtypes = [POINTER(ISC_STATUS), POINTER(USER_SEC_DATA)] - + #: isc_delete_user(POINTER(ISC_STATUS), POINTER(USER_SEC_DATA)) self.isc_delete_user = fb_library.isc_delete_user self.isc_delete_user.restype = ISC_STATUS self.isc_delete_user.argtypes = [POINTER(ISC_STATUS), POINTER(USER_SEC_DATA)] - + #: isc_modify_user(POINTER(ISC_STATUS), POINTER(USER_SEC_DATA)) self.isc_modify_user = fb_library.isc_modify_user self.isc_modify_user.restype = ISC_STATUS self.isc_modify_user.argtypes = [POINTER(ISC_STATUS), POINTER(USER_SEC_DATA)] - + #: isc_compile_request(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_req_handle), c_short, STRING) self.isc_compile_request = fb_library.isc_compile_request self.isc_compile_request.restype = ISC_STATUS self.isc_compile_request.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_req_handle), c_short, STRING] - + #: isc_compile_request2(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_req_handle), c_short, STRING) self.isc_compile_request2 = fb_library.isc_compile_request2 self.isc_compile_request2.restype = ISC_STATUS self.isc_compile_request2.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_req_handle), c_short, STRING] - + #: isc_ddl(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_short, STRING) + #: This function always returns error since FB 3.0 self.isc_ddl = fb_library.isc_ddl self.isc_ddl.restype = ISC_STATUS self.isc_ddl.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_short, STRING] - + #: isc_prepare_transaction(POINTER(ISC_STATUS), POINTER(isc_tr_handle)) self.isc_prepare_transaction = fb_library.isc_prepare_transaction self.isc_prepare_transaction.restype = ISC_STATUS - self.isc_prepare_transaction.argtypes = [POINTER(ISC_STATUS), - POINTER(isc_tr_handle)] - + self.isc_prepare_transaction.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle)] + #: isc_receive(POINTER(ISC_STATUS), POINTER(isc_req_handle), c_short, c_short, c_void_p, c_short) self.isc_receive = fb_library.isc_receive self.isc_receive.restype = ISC_STATUS self.isc_receive.argtypes = [POINTER(ISC_STATUS), POINTER(isc_req_handle), c_short, c_short, c_void_p, c_short] - + #: isc_reconnect_transaction(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_short, STRING) self.isc_reconnect_transaction = fb_library.isc_reconnect_transaction self.isc_reconnect_transaction.restype = ISC_STATUS self.isc_reconnect_transaction.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_short, STRING] - + #: isc_release_request(POINTER(ISC_STATUS), POINTER(isc_req_handle)) self.isc_release_request = fb_library.isc_release_request self.isc_release_request.restype = ISC_STATUS self.isc_release_request.argtypes = [POINTER(ISC_STATUS), POINTER(isc_req_handle)] - + #: isc_request_info(POINTER(ISC_STATUS), POINTER(isc_req_handle), c_short, c_short, STRING, c_short, STRING) self.isc_request_info = fb_library.isc_request_info self.isc_request_info.restype = ISC_STATUS self.isc_request_info.argtypes = [POINTER(ISC_STATUS), POINTER(isc_req_handle), c_short, c_short, STRING, c_short, STRING] - + #: isc_seek_blob(POINTER(ISC_STATUS), POINTER(isc_blob_handle), c_short, ISC_LONG, POINTER(ISC_LONG)) self.isc_seek_blob = fb_library.isc_seek_blob self.isc_seek_blob.restype = ISC_STATUS self.isc_seek_blob.argtypes = [POINTER(ISC_STATUS), POINTER(isc_blob_handle), c_short, ISC_LONG, POINTER(ISC_LONG)] - + #: isc_send(POINTER(ISC_STATUS), POINTER(isc_req_handle), c_short, c_short, c_void_p, c_short) self.isc_send = fb_library.isc_send self.isc_send.restype = ISC_STATUS self.isc_send.argtypes = [POINTER(ISC_STATUS), POINTER(isc_req_handle), c_short, c_short, c_void_p, c_short] - + #: isc_start_and_send(POINTER(ISC_STATUS), POINTER(isc_req_handle), POINTER(isc_tr_handle), c_short, c_short, c_void_p, c_short) self.isc_start_and_send = fb_library.isc_start_and_send self.isc_start_and_send.restype = ISC_STATUS self.isc_start_and_send.argtypes = [POINTER(ISC_STATUS), POINTER(isc_req_handle), POINTER(isc_tr_handle), c_short, c_short, c_void_p, c_short] - + #: isc_start_request(POINTER(ISC_STATUS), POINTER(isc_req_handle), POINTER(isc_tr_handle), c_short) self.isc_start_request = fb_library.isc_start_request self.isc_start_request.restype = ISC_STATUS self.isc_start_request.argtypes = [POINTER(ISC_STATUS), POINTER(isc_req_handle), POINTER(isc_tr_handle), c_short] - + #: isc_unwind_request(POINTER(ISC_STATUS), POINTER(isc_tr_handle), c_short) self.isc_unwind_request = fb_library.isc_unwind_request self.isc_unwind_request.restype = ISC_STATUS self.isc_unwind_request.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), c_short] - + #: isc_wait_for_event(POINTER(ISC_STATUS), POINTER(isc_db_handle), c_short, POINTER(ISC_UCHAR), POINTER(ISC_UCHAR)) self.isc_wait_for_event = fb_library.isc_wait_for_event self.isc_wait_for_event.restype = ISC_STATUS self.isc_wait_for_event.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), c_short, POINTER(ISC_UCHAR), POINTER(ISC_UCHAR)] - + #: isc_close(POINTER(ISC_STATUS), STRING) self.isc_close = fb_library.isc_close self.isc_close.restype = ISC_STATUS self.isc_close.argtypes = [POINTER(ISC_STATUS), STRING] - + #: isc_declare(POINTER(ISC_STATUS), STRING, STRING) self.isc_declare = fb_library.isc_declare self.isc_declare.restype = ISC_STATUS self.isc_declare.argtypes = [POINTER(ISC_STATUS), STRING, STRING] - + #: isc_describe(POINTER(ISC_STATUS), STRING, POINTER(XSQLDA)) self.isc_describe = fb_library.isc_describe self.isc_describe.restype = ISC_STATUS self.isc_describe.argtypes = [POINTER(ISC_STATUS), STRING, POINTER(XSQLDA)] - + #: isc_describe_bind(POINTER(ISC_STATUS), STRING, POINTER(XSQLDA)) self.isc_describe_bind = fb_library.isc_describe_bind self.isc_describe_bind.restype = ISC_STATUS self.isc_describe_bind.argtypes = [POINTER(ISC_STATUS), STRING, POINTER(XSQLDA)] - + #: isc_execute(POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, POINTER(XSQLDA)) self.isc_execute = fb_library.isc_execute self.isc_execute.restype = ISC_STATUS self.isc_execute.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, POINTER(XSQLDA)] - + #: isc_execute_immediate(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), POINTER(c_short), STRING) self.isc_execute_immediate = fb_library.isc_execute_immediate self.isc_execute_immediate.restype = ISC_STATUS - self.isc_execute_immediate.argtypes = [POINTER(ISC_STATUS), - POINTER(isc_db_handle), - POINTER(isc_tr_handle), - POINTER(c_short), STRING] - + self.isc_execute_immediate.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), + POINTER(isc_tr_handle), POINTER(c_short), STRING] + #: isc_fetch(POINTER(ISC_STATUS), STRING, POINTER(XSQLDA)) self.isc_fetch = fb_library.isc_fetch self.isc_fetch.restype = ISC_STATUS self.isc_fetch.argtypes = [POINTER(ISC_STATUS), STRING, POINTER(XSQLDA)] - + #: isc_open(POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, POINTER(XSQLDA)) self.isc_open = fb_library.isc_open self.isc_open.restype = ISC_STATUS - self.isc_open.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), - STRING, POINTER(XSQLDA)] - + self.isc_open.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, POINTER(XSQLDA)] + #: isc_prepare(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), STRING, POINTER(c_short), STRING, POINTER(XSQLDA)) self.isc_prepare = fb_library.isc_prepare self.isc_prepare.restype = ISC_STATUS self.isc_prepare.argtypes = [POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), STRING, POINTER(c_short), STRING, POINTER(XSQLDA)] - + #: isc_dsql_execute_m(POINTER(ISC_STATUS), POINTER(isc_tr_handle), POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, c_ushort, STRING) self.isc_dsql_execute_m = fb_library.isc_dsql_execute_m self.isc_dsql_execute_m.restype = ISC_STATUS self.isc_dsql_execute_m.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, c_ushort, STRING] - + #: isc_dsql_execute2_m(POINTER(ISC_STATUS), POINTER(isc_tr_handle), POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, c_ushort, STRING, c_ushort, STRING, c_ushort, c_ushort, STRING) self.isc_dsql_execute2_m = fb_library.isc_dsql_execute2_m self.isc_dsql_execute2_m.restype = ISC_STATUS self.isc_dsql_execute2_m.argtypes = [POINTER(ISC_STATUS), @@ -1811,7 +1925,7 @@ STRING, c_ushort, c_ushort, STRING, c_ushort, STRING, c_ushort, c_ushort, STRING] - + #: isc_dsql_execute_immediate_m(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_ushort, STRING, c_ushort, c_ushort, STRING, c_ushort, c_ushort, STRING) self.isc_dsql_execute_immediate_m = fb_library.isc_dsql_execute_immediate_m self.isc_dsql_execute_immediate_m.restype = ISC_STATUS self.isc_dsql_execute_immediate_m.argtypes = [POINTER(ISC_STATUS), @@ -1820,7 +1934,7 @@ c_ushort, STRING, c_ushort, c_ushort, STRING, c_ushort, c_ushort, STRING] - + #: isc_dsql_exec_immed3_m(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_ushort, STRING, c_ushort, c_ushort, STRING, c_ushort, c_ushort, STRING, c_ushort, STRING, c_ushort, c_ushort, STRING) self.isc_dsql_exec_immed3_m = fb_library.isc_dsql_exec_immed3_m self.isc_dsql_exec_immed3_m.restype = ISC_STATUS self.isc_dsql_exec_immed3_m.argtypes = [POINTER(ISC_STATUS), @@ -1830,19 +1944,19 @@ STRING, c_ushort, c_ushort, STRING, c_ushort, STRING, c_ushort, c_ushort, STRING] - + #: isc_dsql_fetch_m(POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, c_ushort, STRING) self.isc_dsql_fetch_m = fb_library.isc_dsql_fetch_m self.isc_dsql_fetch_m.restype = ISC_STATUS self.isc_dsql_fetch_m.argtypes = [POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, c_ushort, STRING] - + #: isc_dsql_insert_m(POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, c_ushort, STRING) self.isc_dsql_insert_m = fb_library.isc_dsql_insert_m self.isc_dsql_insert_m.restype = ISC_STATUS self.isc_dsql_insert_m.argtypes = [POINTER(ISC_STATUS), POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, c_ushort, STRING] - + #: isc_dsql_prepare_m(POINTER(ISC_STATUS), POINTER(isc_tr_handle), POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, c_ushort, STRING, c_ushort, STRING) self.isc_dsql_prepare_m = fb_library.isc_dsql_prepare_m self.isc_dsql_prepare_m.restype = ISC_STATUS self.isc_dsql_prepare_m.argtypes = [POINTER(ISC_STATUS), @@ -1850,42 +1964,42 @@ POINTER(isc_stmt_handle), c_ushort, STRING, c_ushort, c_ushort, STRING, c_ushort, STRING] - + #: isc_dsql_release(POINTER(ISC_STATUS), STRING) self.isc_dsql_release = fb_library.isc_dsql_release self.isc_dsql_release.restype = ISC_STATUS self.isc_dsql_release.argtypes = [POINTER(ISC_STATUS), STRING] - + #: isc_embed_dsql_close(POINTER(ISC_STATUS), STRING) self.isc_embed_dsql_close = fb_library.isc_embed_dsql_close self.isc_embed_dsql_close.restype = ISC_STATUS self.isc_embed_dsql_close.argtypes = [POINTER(ISC_STATUS), STRING] - + #: isc_embed_dsql_declare(POINTER(ISC_STATUS), STRING, STRING) self.isc_embed_dsql_declare = fb_library.isc_embed_dsql_declare self.isc_embed_dsql_declare.restype = ISC_STATUS self.isc_embed_dsql_declare.argtypes = [POINTER(ISC_STATUS), STRING, STRING] - + #: isc_embed_dsql_describe(POINTER(ISC_STATUS), STRING, c_ushort, POINTER(XSQLDA)) self.isc_embed_dsql_describe = fb_library.isc_embed_dsql_describe self.isc_embed_dsql_describe.restype = ISC_STATUS self.isc_embed_dsql_describe.argtypes = [POINTER(ISC_STATUS), STRING, c_ushort, POINTER(XSQLDA)] - + #: isc_embed_dsql_describe_bind(POINTER(ISC_STATUS), STRING, c_ushort, POINTER(XSQLDA)) self.isc_embed_dsql_describe_bind = fb_library.isc_embed_dsql_describe_bind self.isc_embed_dsql_describe_bind.restype = ISC_STATUS self.isc_embed_dsql_describe_bind.argtypes = [POINTER(ISC_STATUS), STRING, c_ushort, POINTER(XSQLDA)] - + #: isc_embed_dsql_execute(POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, c_ushort, POINTER(XSQLDA)) self.isc_embed_dsql_execute = fb_library.isc_embed_dsql_execute self.isc_embed_dsql_execute.restype = ISC_STATUS self.isc_embed_dsql_execute.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, c_ushort, POINTER(XSQLDA)] - + #: isc_embed_dsql_execute2(POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, c_ushort, POINTER(XSQLDA), POINTER(XSQLDA)) self.isc_embed_dsql_execute2 = fb_library.isc_embed_dsql_execute2 self.isc_embed_dsql_execute2.restype = ISC_STATUS self.isc_embed_dsql_execute2.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, c_ushort, POINTER(XSQLDA), POINTER(XSQLDA)] - + #: isc_embed_dsql_execute_immed(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), c_ushort, STRING, c_ushort, POINTER(XSQLDA)) self.isc_embed_dsql_execute_immed = fb_library.isc_embed_dsql_execute_immed self.isc_embed_dsql_execute_immed.restype = ISC_STATUS self.isc_embed_dsql_execute_immed.argtypes = [POINTER(ISC_STATUS), @@ -1893,35 +2007,35 @@ POINTER(isc_tr_handle), c_ushort, STRING, c_ushort, POINTER(XSQLDA)] - + #: isc_embed_dsql_fetch(POINTER(ISC_STATUS), STRING, c_ushort, POINTER(XSQLDA)) self.isc_embed_dsql_fetch = fb_library.isc_embed_dsql_fetch self.isc_embed_dsql_fetch.restype = ISC_STATUS self.isc_embed_dsql_fetch.argtypes = [POINTER(ISC_STATUS), STRING, c_ushort, POINTER(XSQLDA)] - + #: isc_embed_dsql_fetch_a(POINTER(ISC_STATUS), POINTER(c_int), STRING, ISC_USHORT, POINTER(XSQLDA)) self.isc_embed_dsql_fetch_a = fb_library.isc_embed_dsql_fetch_a self.isc_embed_dsql_fetch_a.restype = ISC_STATUS self.isc_embed_dsql_fetch_a.argtypes = [POINTER(ISC_STATUS), POINTER(c_int), STRING, ISC_USHORT, POINTER(XSQLDA)] - + #: isc_embed_dsql_open(POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, c_ushort, POINTER(XSQLDA)) self.isc_embed_dsql_open = fb_library.isc_embed_dsql_open self.isc_embed_dsql_open.restype = ISC_STATUS self.isc_embed_dsql_open.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, c_ushort, POINTER(XSQLDA)] - + #: isc_embed_dsql_open2(POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, c_ushort, POINTER(XSQLDA), POINTER(XSQLDA)) self.isc_embed_dsql_open2 = fb_library.isc_embed_dsql_open2 self.isc_embed_dsql_open2.restype = ISC_STATUS self.isc_embed_dsql_open2.argtypes = [POINTER(ISC_STATUS), POINTER(isc_tr_handle), STRING, c_ushort, POINTER(XSQLDA), POINTER(XSQLDA)] - + #: isc_embed_dsql_insert(POINTER(ISC_STATUS), STRING, c_ushort, POINTER(XSQLDA)) self.isc_embed_dsql_insert = fb_library.isc_embed_dsql_insert self.isc_embed_dsql_insert.restype = ISC_STATUS self.isc_embed_dsql_insert.argtypes = [POINTER(ISC_STATUS), STRING, c_ushort, POINTER(XSQLDA)] - + #: isc_embed_dsql_prepare(POINTER(ISC_STATUS), POINTER(isc_db_handle), POINTER(isc_tr_handle), STRING, c_ushort, STRING, c_ushort, POINTER(XSQLDA)) self.isc_embed_dsql_prepare = fb_library.isc_embed_dsql_prepare self.isc_embed_dsql_prepare.restype = ISC_STATUS self.isc_embed_dsql_prepare.argtypes = [POINTER(ISC_STATUS), @@ -1929,106 +2043,92 @@ POINTER(isc_tr_handle), STRING, c_ushort, STRING, c_ushort, POINTER(XSQLDA)] - + #: isc_embed_dsql_release(POINTER(ISC_STATUS), STRING) self.isc_embed_dsql_release = fb_library.isc_embed_dsql_release self.isc_embed_dsql_release.restype = ISC_STATUS self.isc_embed_dsql_release.argtypes = [POINTER(ISC_STATUS), STRING] - + #: BLOB_open(isc_blob_handle, STRING, c_int) self.BLOB_open = fb_library.BLOB_open self.BLOB_open.restype = POINTER(BSTREAM) self.BLOB_open.argtypes = [isc_blob_handle, STRING, c_int] - + #: BLOB_put(ISC_SCHAR, POINTER(BSTREAM)) self.BLOB_put = fb_library.BLOB_put self.BLOB_put.restype = c_int self.BLOB_put.argtypes = [ISC_SCHAR, POINTER(BSTREAM)] - + #: BLOB_close(POINTER(BSTREAM)) self.BLOB_close = fb_library.BLOB_close self.BLOB_close.restype = c_int self.BLOB_close.argtypes = [POINTER(BSTREAM)] - + #: BLOB_get(POINTER(BSTREAM)) self.BLOB_get = fb_library.BLOB_get self.BLOB_get.restype = c_int self.BLOB_get.argtypes = [POINTER(BSTREAM)] - + #: BLOB_display(POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING) self.BLOB_display = fb_library.BLOB_display self.BLOB_display.restype = c_int - self.BLOB_display.argtypes = [POINTER(ISC_QUAD), isc_db_handle, - isc_tr_handle, STRING] - + self.BLOB_display.argtypes = [POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING] + #: BLOB_dump(POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING) self.BLOB_dump = fb_library.BLOB_dump self.BLOB_dump.restype = c_int - self.BLOB_dump.argtypes = [POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, - STRING] - + self.BLOB_dump.argtypes = [POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING] + #: BLOB_edit(POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING) self.BLOB_edit = fb_library.BLOB_edit self.BLOB_edit.restype = c_int - self.BLOB_edit.argtypes = [POINTER(ISC_QUAD), isc_db_handle, - isc_tr_handle, STRING] - + self.BLOB_edit.argtypes = [POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING] + #: BLOB_load(POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING) self.BLOB_load = fb_library.BLOB_load self.BLOB_load.restype = c_int - self.BLOB_load.argtypes = [POINTER(ISC_QUAD), isc_db_handle, - isc_tr_handle, STRING] - + self.BLOB_load.argtypes = [POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING] + #: BLOB_text_dump(POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING) self.BLOB_text_dump = fb_library.BLOB_text_dump self.BLOB_text_dump.restype = c_int - self.BLOB_text_dump.argtypes = [POINTER(ISC_QUAD), isc_db_handle, - isc_tr_handle, STRING] - + self.BLOB_text_dump.argtypes = [POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING] + #: BLOB_text_load(POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING) self.BLOB_text_load = fb_library.BLOB_text_load self.BLOB_text_load.restype = c_int - self.BLOB_text_load.argtypes = [POINTER(ISC_QUAD), isc_db_handle, - isc_tr_handle, STRING] - + self.BLOB_text_load.argtypes = [POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING] + #: Bopen(POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING) self.Bopen = fb_library.Bopen self.Bopen.restype = POINTER(BSTREAM) - self.Bopen.argtypes = [POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, - STRING] - + self.Bopen.argtypes = [POINTER(ISC_QUAD), isc_db_handle, isc_tr_handle, STRING] + #: isc_ftof(STRING, c_ushort, STRING, c_ushort) self.isc_ftof = fb_library.isc_ftof self.isc_ftof.restype = ISC_LONG self.isc_ftof.argtypes = [STRING, c_ushort, STRING, c_ushort] - + #: isc_print_blr(STRING, ISC_PRINT_CALLBACK, c_void_p, c_short) self.isc_print_blr = fb_library.isc_print_blr self.isc_print_blr.restype = ISC_STATUS self.isc_print_blr.argtypes = [STRING, ISC_PRINT_CALLBACK, c_void_p, c_short] - + #: isc_set_debug(c_int) self.isc_set_debug = fb_library.isc_set_debug self.isc_set_debug.restype = None self.isc_set_debug.argtypes = [c_int] - + #: isc_qtoq(POINTER(ISC_QUAD), POINTER(ISC_QUAD)) self.isc_qtoq = fb_library.isc_qtoq self.isc_qtoq.restype = None self.isc_qtoq.argtypes = [POINTER(ISC_QUAD), POINTER(ISC_QUAD)] - + #: isc_vtof(STRING, STRING, c_ushort) self.isc_vtof = fb_library.isc_vtof self.isc_vtof.restype = None self.isc_vtof.argtypes = [STRING, STRING, c_ushort] - + #: isc_vtov(STRING, STRING, c_short) self.isc_vtov = fb_library.isc_vtov self.isc_vtov.restype = None self.isc_vtov.argtypes = [STRING, STRING, c_short] - + #: isc_version(POINTER(isc_db_handle), ISC_VERSION_CALLBACK, c_void_p) self.isc_version = fb_library.isc_version self.isc_version.restype = c_int - self.isc_version.argtypes = [POINTER(isc_db_handle), - ISC_VERSION_CALLBACK, c_void_p] - - # deprecated - #self.isc_reset_fpe = fb_library.isc_reset_fpe - #self.isc_reset_fpe.restype = ISC_LONG - #self.isc_reset_fpe.argtypes = [ISC_USHORT] - + self.isc_version.argtypes = [POINTER(isc_db_handle), ISC_VERSION_CALLBACK, c_void_p] + #: isc_service_attach(POINTER(ISC_STATUS), c_ushort, STRING, POINTER(isc_svc_handle), c_ushort, STRING) self.isc_service_attach = fb_library.isc_service_attach self.isc_service_attach.restype = ISC_STATUS self.isc_service_attach.argtypes = [POINTER(ISC_STATUS), c_ushort, STRING, POINTER(isc_svc_handle), c_ushort, STRING] - + #: isc_service_detach(POINTER(ISC_STATUS), POINTER(isc_svc_handle)) self.isc_service_detach = fb_library.isc_service_detach self.isc_service_detach.restype = ISC_STATUS - self.isc_service_detach.argtypes = [POINTER(ISC_STATUS), - POINTER(isc_svc_handle)] - + self.isc_service_detach.argtypes = [POINTER(ISC_STATUS), POINTER(isc_svc_handle)] + #: isc_service_query(POINTER(ISC_STATUS), POINTER(isc_svc_handle), POINTER(isc_resv_handle), c_ushort, STRING, c_ushort, STRING, c_ushort, STRING) self.isc_service_query = fb_library.isc_service_query self.isc_service_query.restype = ISC_STATUS self.isc_service_query.argtypes = [POINTER(ISC_STATUS), @@ -2036,22 +2136,22 @@ POINTER(isc_resv_handle), c_ushort, STRING, c_ushort, STRING, c_ushort, STRING] - + #: isc_service_start(POINTER(ISC_STATUS), POINTER(isc_svc_handle), POINTER(isc_resv_handle), c_ushort, STRING) self.isc_service_start = fb_library.isc_service_start self.isc_service_start.restype = ISC_STATUS self.isc_service_start.argtypes = [POINTER(ISC_STATUS), POINTER(isc_svc_handle), POINTER(isc_resv_handle), c_ushort, STRING] - + #: isc_get_client_version(STRING) self.isc_get_client_version = fb_library.isc_get_client_version self.isc_get_client_version.restype = None self.isc_get_client_version.argtypes = [STRING] - + #: isc_get_client_major_version() self.isc_get_client_major_version = fb_library.isc_get_client_major_version self.isc_get_client_major_version.restype = c_int self.isc_get_client_major_version.argtypes = [] - + #: isc_get_client_minor_version() self.isc_get_client_minor_version = fb_library.isc_get_client_minor_version self.isc_get_client_minor_version.restype = c_int self.isc_get_client_minor_version.argtypes = [] @@ -2080,19 +2180,21 @@ #self.wcstoumax.restype = uintmax_t #self.wcstoumax.argtypes = [WSTRING, POINTER(WSTRING), c_int] - self.P_isc_event_block = CFUNCTYPE(ISC_LONG,POINTER(POINTER(ISC_UCHAR)), - POINTER(POINTER(ISC_UCHAR)), ISC_USHORT) - self.C_isc_event_block = self.P_isc_event_block(('isc_event_block',fb_library)) + self.P_isc_event_block = CFUNCTYPE(ISC_LONG, POINTER(POINTER(ISC_UCHAR)), + POINTER(POINTER(ISC_UCHAR)), ISC_USHORT) + #: C_isc_event_block(ISC_LONG, POINTER(POINTER(ISC_UCHAR)), POINTER(POINTER(ISC_UCHAR)), ISC_USHORT) + self.C_isc_event_block = self.P_isc_event_block(('isc_event_block', fb_library)) self.P_isc_event_block_args = self.C_isc_event_block.argtypes - def isc_event_block(self,event_buffer,result_buffer,*args): + def isc_event_block(self, event_buffer, result_buffer, *args): + "Injects variable number of parameters into C_isc_event_block call" if len(args) > 15: raise Exception("isc_event_block takes no more than 15 event names") newargs = list(self.P_isc_event_block_args) for x in args: newargs.append(STRING) self.C_isc_event_block.argtypes = newargs - result = self.C_isc_event_block(event_buffer,result_buffer,len(args),*args) + result = self.C_isc_event_block(event_buffer, result_buffer, len(args), *args) return result diff -Nru fdb-1.6.1+dfsg1/fdb/__init__.py fdb-2.0.0/fdb/__init__.py --- fdb-1.6.1+dfsg1/fdb/__init__.py 2016-03-26 14:01:14.000000000 +0000 +++ fdb-2.0.0/fdb/__init__.py 2018-04-26 14:39:03.000000000 +0000 @@ -12,7 +12,7 @@ # # The Original Code was created by Pavel Cisar # -# Copyright (c) 2011 Pavel Cisar +# Copyright (c) Pavel Cisar # and all contributors signed below. # # All Rights Reserved. @@ -23,127 +23,102 @@ from fdb.fbcore import * from fdb.fbcore import __version__ from fdb import services +from fdb import blr +from fdb import trace +from fdb import gstat __all__ = (# Common with KInterbasDB - 'BINARY', 'Binary', 'BlobReader', 'Connection', 'ConnectionGroup', - 'Cursor', 'DATETIME', 'DBAPITypeObject', 'DESCRIPTION_DISPLAY_SIZE', - 'DESCRIPTION_INTERNAL_SIZE', 'DESCRIPTION_NAME', 'DESCRIPTION_NULL_OK', - 'DESCRIPTION_PRECISION', 'DESCRIPTION_SCALE', 'DESCRIPTION_TYPE_CODE', - 'DIST_TRANS_MAX_DATABASES', 'DataError', 'DatabaseError', 'Date', - 'DateFromTicks', 'Error', 'EventConduit', 'IntegrityError', - 'InterfaceError', 'InternalError', 'NUMBER', 'NotSupportedError', - 'OperationalError', 'PreparedStatement', 'ProgrammingError', 'ROWID', - 'STRING', 'TPB', 'TableReservation', 'Time', 'TimeFromTicks', - 'TimestampFromTicks', 'Transaction', 'TransactionConflict', 'Warning', - '__version__', 'apilevel', 'connect', 'create_database', - 'frb_info_att_charset', 'isc_dpb_activate_shadow', 'isc_dpb_address_path', - 'isc_dpb_allocation', 'isc_dpb_begin_log', 'isc_dpb_buffer_length', - 'isc_dpb_cache_manager', 'isc_dpb_cdd_pathname', 'isc_dpb_connect_timeout', - 'isc_dpb_damaged', 'isc_dpb_dbkey_scope', 'isc_dpb_debug', - 'isc_dpb_delete_shadow', 'isc_dpb_disable_journal', 'isc_dpb_disable_wal', - 'isc_dpb_drop_walfile', 'isc_dpb_dummy_packet_interval', - 'isc_dpb_enable_journal', 'isc_dpb_encrypt_key', 'isc_dpb_force_write', - 'isc_dpb_garbage_collect', 'isc_dpb_gbak_attach', 'isc_dpb_gfix_attach', - 'isc_dpb_gsec_attach', 'isc_dpb_gstat_attach', 'isc_dpb_interp', - 'isc_dpb_journal', 'isc_dpb_lc_ctype', 'isc_dpb_lc_messages', - 'isc_dpb_license', 'isc_dpb_no_garbage_collect', 'isc_dpb_no_reserve', - 'isc_dpb_num_buffers', 'isc_dpb_number_of_users', 'isc_dpb_old_dump_id', - 'isc_dpb_old_file', 'isc_dpb_old_file_size', 'isc_dpb_old_num_files', - 'isc_dpb_old_start_file', 'isc_dpb_old_start_page', 'isc_dpb_old_start_seqno', - 'isc_dpb_online', 'isc_dpb_online_dump', 'isc_dpb_overwrite', - 'isc_dpb_page_size', 'isc_dpb_password', 'isc_dpb_password_enc', - 'isc_dpb_quit_log', 'isc_dpb_reserved', 'isc_dpb_sec_attach', - 'isc_dpb_set_db_charset', 'isc_dpb_set_db_readonly', - 'isc_dpb_set_db_sql_dialect', 'isc_dpb_set_page_buffers', - 'isc_dpb_shutdown', 'isc_dpb_shutdown_delay', 'isc_dpb_sql_dialect', - 'isc_dpb_sql_role_name', 'isc_dpb_sweep', 'isc_dpb_sweep_interval', - 'isc_dpb_sys_user_name', 'isc_dpb_sys_user_name_enc', 'isc_dpb_trace', - 'isc_dpb_user_name', 'isc_dpb_verify', 'isc_dpb_version1', - 'isc_dpb_wal_backup_dir', 'isc_dpb_wal_bufsize', 'isc_dpb_wal_chkptlen', - 'isc_dpb_wal_grp_cmt_wait', 'isc_dpb_wal_numbufs', 'isc_dpb_working_directory', - 'isc_info_active_tran_count', 'isc_info_active_transactions', - 'isc_info_allocation', 'isc_info_attachment_id', 'isc_info_backout_count', - 'isc_info_base_level', 'isc_info_bpage_errors', 'isc_info_creation_date', - 'isc_info_cur_log_part_offset', 'isc_info_cur_logfile_name', - 'isc_info_current_memory', 'isc_info_db_class', 'fb_info_page_contents', - 'isc_info_db_id', 'isc_info_db_provider', 'isc_info_db_read_only', - 'isc_info_db_size_in_pages', 'isc_info_db_sql_dialect', - 'isc_info_delete_count', 'isc_info_dpage_errors', 'isc_info_expunge_count', - 'isc_info_fetches', 'isc_info_firebird_version', 'isc_info_forced_writes', - 'isc_info_implementation', 'isc_info_insert_count', 'isc_info_ipage_errors', - 'isc_info_isc_version', 'isc_info_license', 'isc_info_limbo', - 'isc_info_logfile', 'isc_info_marks', 'isc_info_max_memory', - 'isc_info_next_transaction', 'isc_info_no_reserve', 'isc_info_num_buffers', - 'isc_info_num_wal_buffers', 'isc_info_ods_minor_version', - 'isc_info_ods_version', 'isc_info_oldest_active', 'isc_info_oldest_snapshot', - 'isc_info_oldest_transaction', 'isc_info_page_errors', 'isc_info_page_size', - 'isc_info_ppage_errors', 'isc_info_purge_count', 'isc_info_read_idx_count', - 'isc_info_read_seq_count', 'isc_info_reads', 'isc_info_record_errors', - 'isc_info_set_page_buffers', 'isc_info_sql_stmt_commit', - 'isc_info_sql_stmt_ddl', 'isc_info_sql_stmt_delete', - 'isc_info_sql_stmt_exec_procedure', 'isc_info_sql_stmt_get_segment', - 'isc_info_sql_stmt_insert', 'isc_info_sql_stmt_put_segment', - 'isc_info_sql_stmt_rollback', 'isc_info_sql_stmt_savepoint', - 'isc_info_sql_stmt_select', 'isc_info_sql_stmt_select_for_upd', - 'isc_info_sql_stmt_set_generator', 'isc_info_sql_stmt_start_trans', - 'isc_info_sql_stmt_update', 'isc_info_sweep_interval', 'isc_info_tpage_errors', - 'isc_info_tra_access', 'isc_info_tra_concurrency', 'isc_info_tra_consistency', - 'isc_info_tra_id', 'isc_info_tra_isolation', 'isc_info_tra_lock_timeout', - 'isc_info_tra_no_rec_version', 'isc_info_tra_oldest_active', - 'isc_info_tra_oldest_interesting', 'isc_info_tra_oldest_snapshot', - 'isc_info_tra_read_committed', 'isc_info_tra_readonly','fb_info_tra_dbpath', - 'isc_info_tra_readwrite', 'isc_info_tra_rec_version', 'isc_info_update_count', - 'isc_info_user_names', 'isc_info_version', 'isc_info_wal_avg_grpc_size', - 'isc_info_wal_avg_io_size', 'isc_info_wal_buffer_size', - 'isc_info_wal_ckpt_length', 'isc_info_wal_cur_ckpt_interval', - 'isc_info_wal_grpc_wait_usecs', 'isc_info_wal_num_commits', - 'isc_info_wal_num_io', 'isc_info_wal_prv_ckpt_fname', - 'isc_info_wal_prv_ckpt_poffset', 'isc_info_wal_recv_ckpt_fname', - 'isc_info_wal_recv_ckpt_poffset', 'isc_info_window_turns', - 'isc_info_writes', 'isc_tpb_autocommit', 'isc_tpb_commit_time', - 'isc_tpb_concurrency', 'isc_tpb_consistency', 'isc_tpb_exclusive', - 'isc_tpb_ignore_limbo', 'isc_tpb_lock_read', 'isc_tpb_lock_timeout', - 'isc_tpb_lock_write', 'isc_tpb_no_auto_undo', 'isc_tpb_no_rec_version', - 'isc_tpb_nowait', 'isc_tpb_protected', 'isc_tpb_read', - 'isc_tpb_read_committed', 'isc_tpb_rec_version', 'isc_tpb_restart_requests', - 'isc_tpb_shared', 'isc_tpb_verb_time', 'isc_tpb_version3', 'isc_tpb_wait', - 'isc_tpb_write', 'paramstyle', 'threadsafety', - # New in FDB - 'ISOLATION_LEVEL_READ_COMMITED', 'ISOLATION_LEVEL_READ_COMMITED_LEGACY', - 'ISOLATION_LEVEL_REPEATABLE_READ', 'ISOLATION_LEVEL_SERIALIZABLE', - 'ISOLATION_LEVEL_SNAPSHOT', 'ISOLATION_LEVEL_SNAPSHOT_TABLE_STABILITY', - 'ISOLATION_LEVEL_READ_COMMITED_RO', - 'MAX_BLOB_SEGMENT_SIZE', - 'SQL_ARRAY', 'SQL_BLOB', 'SQL_DOUBLE', 'SQL_D_FLOAT', 'SQL_FLOAT', - 'SQL_INT64', 'SQL_LONG', 'SQL_QUAD', 'SQL_SHORT', 'SQL_TEXT', - 'SQL_TIMESTAMP', 'SQL_TYPE_DATE', 'SQL_TYPE_TIME', 'SQL_VARYING', - 'SUBTYPE_DECIMAL', 'SUBTYPE_NUMERIC', 'SQL_BOOLEAN', 'build_dpb', - 'charset_map', 'load_api', - 'isc_info_end', 'isc_sqlcode', 'bs', 'ConnectionWithSchema', - 'ODS_FB_20','ODS_FB_21','ODS_FB_25','ODS_FB_30', - - ) - -# Present in KDB but missing in FDB - -# 'BASELINE_TYPE_TRANSLATION_FACILITIES', -# 'CT_COMMIT', 'CT_DEFAULT', 'CT_NONTRANSPARENT', 'CT_ROLLBACK', 'CT_VETO', -# 'ConduitWasClosed', 'ConnectionTimedOut', 'DEFAULT_CONCURRENCY_LEVEL', -# 'ExternallyVisibleMainTransaction', 'FB_API_VER', 'Timestamp', -# '_ALL_EXCEPTION_CLASSES', '_CONNECTION_TIMEOUT_SUPPORTED', -# '_Cursor_execute_exception_type_filter', '_DATABASE_INFO_CODES_WITH_COUNT_RESULTS', -# '_DATABASE_INFO_CODES_WITH_INT_RESULT', '_DATABASE_INFO_CODES_WITH_TIMESTAMP_RESULT', -# '_DATABASE_INFO__KNOWN_LOW_LEVEL_EXCEPTIONS', '_DPBBuilder', -# '_DPB_CODES_WITH_STRING_VALUE', '_DPB_CODE_WITH_INT_VALUE', -# '_EVENT_HANDLING_SUPPORTED', '_FS_ENCODING', '_MINIMAL_TYPE_TRANS_TYPES', -# '_NORMAL_TYPE_TRANS_IN', '_NORMAL_TYPE_TRANS_OUT', '_OUT_TRANS_FUNC_SAMPLE_ARGS', -# '_TRANSACTION_CONFLICT_RAW_CODES', -# '__timestamp__', '_addDatabaseInfoCodeIfPresent', '_connection_timeout', -# '_ensureInitialized', '_extractDatabaseInfoCounts', -# '_guessTextualBlobEncodingWhenUsingFB20AndEarlier', '_k', '_kinterbasdb', -# '_look_up_array_descriptor', '_look_up_array_subtype', -# '_make_output_translator_return_type_dict_from_trans_dict', -# '_normalizeDatabaseIdentifier', '_request_buffer_builder', '_trans_info', -# '_trans_require_dict', '_validateTPB', 'default_tpb', 'get_concurrency_level', -# 'init', 'initialized', 'k_exceptions', 'portable_int', 'raw_byte_to_int', -# 'raw_timestamp_to_tuple'] \ No newline at end of file + 'BINARY', 'Binary', 'BlobReader', 'Connection', 'ConnectionGroup', + 'Cursor', 'DATETIME', 'DBAPITypeObject', 'DESCRIPTION_DISPLAY_SIZE', + 'DESCRIPTION_INTERNAL_SIZE', 'DESCRIPTION_NAME', 'DESCRIPTION_NULL_OK', + 'DESCRIPTION_PRECISION', 'DESCRIPTION_SCALE', 'DESCRIPTION_TYPE_CODE', + 'DIST_TRANS_MAX_DATABASES', 'DataError', 'DatabaseError', 'Date', + 'DateFromTicks', 'Error', 'EventConduit', 'IntegrityError', + 'InterfaceError', 'InternalError', 'NUMBER', 'NotSupportedError', + 'OperationalError', 'PreparedStatement', 'ProgrammingError', 'ROWID', + 'STRING', 'TPB', 'TableReservation', 'ParameterBuffer', 'Time', 'TimeFromTicks', + 'TimestampFromTicks', 'Transaction', 'TransactionConflict', + '__version__', 'apilevel', 'connect', 'create_database', + 'frb_info_att_charset', 'isc_dpb_activate_shadow', 'isc_dpb_address_path', + 'isc_dpb_allocation', 'isc_dpb_begin_log', 'isc_dpb_buffer_length', + 'isc_dpb_cache_manager', 'isc_dpb_cdd_pathname', 'isc_dpb_connect_timeout', + 'isc_dpb_damaged', 'isc_dpb_dbkey_scope', 'isc_dpb_debug', + 'isc_dpb_delete_shadow', + 'isc_dpb_dummy_packet_interval', + 'isc_dpb_encrypt_key', 'isc_dpb_force_write', + 'isc_dpb_garbage_collect', 'isc_dpb_gbak_attach', 'isc_dpb_gfix_attach', + 'isc_dpb_gsec_attach', 'isc_dpb_gstat_attach', 'isc_dpb_interp', + 'isc_dpb_lc_ctype', 'isc_dpb_lc_messages', + 'isc_dpb_no_garbage_collect', 'isc_dpb_no_reserve', + 'isc_dpb_num_buffers', 'isc_dpb_number_of_users', 'isc_dpb_old_dump_id', + 'isc_dpb_old_file', 'isc_dpb_old_file_size', 'isc_dpb_old_num_files', + 'isc_dpb_old_start_file', 'isc_dpb_old_start_page', 'isc_dpb_old_start_seqno', + 'isc_dpb_online', 'isc_dpb_online_dump', 'isc_dpb_overwrite', + 'isc_dpb_page_size', 'isc_dpb_password', 'isc_dpb_password_enc', + 'isc_dpb_quit_log', 'isc_dpb_reserved', 'isc_dpb_sec_attach', + 'isc_dpb_set_db_charset', 'isc_dpb_set_db_readonly', + 'isc_dpb_set_db_sql_dialect', 'isc_dpb_set_page_buffers', + 'isc_dpb_shutdown', 'isc_dpb_shutdown_delay', 'isc_dpb_sql_dialect', + 'isc_dpb_sql_role_name', 'isc_dpb_sweep', 'isc_dpb_sweep_interval', + 'isc_dpb_sys_user_name', 'isc_dpb_sys_user_name_enc', 'isc_dpb_trace', + 'isc_dpb_user_name', 'isc_dpb_verify', 'isc_dpb_version1', + 'isc_dpb_working_directory', 'isc_info_active_tran_count', 'isc_info_active_transactions', + 'isc_info_allocation', 'isc_info_attachment_id', 'isc_info_backout_count', + 'isc_info_base_level', 'isc_info_bpage_errors', 'isc_info_creation_date', + 'isc_info_cur_log_part_offset', 'isc_info_cur_logfile_name', + 'isc_info_current_memory', 'isc_info_db_class', 'fb_info_page_contents', + 'isc_info_db_id', 'isc_info_db_provider', 'isc_info_db_read_only', + 'isc_info_db_size_in_pages', 'isc_info_db_sql_dialect', + 'isc_info_delete_count', 'isc_info_dpage_errors', 'isc_info_expunge_count', + 'isc_info_fetches', 'isc_info_firebird_version', 'isc_info_forced_writes', + 'isc_info_implementation', 'isc_info_insert_count', 'isc_info_ipage_errors', + 'isc_info_isc_version', 'isc_info_license', 'isc_info_limbo', + 'isc_info_logfile', 'isc_info_marks', 'isc_info_max_memory', + 'isc_info_next_transaction', 'isc_info_no_reserve', 'isc_info_num_buffers', + 'isc_info_num_wal_buffers', 'isc_info_ods_minor_version', + 'isc_info_ods_version', 'isc_info_oldest_active', 'isc_info_oldest_snapshot', + 'isc_info_oldest_transaction', 'isc_info_page_errors', 'isc_info_page_size', + 'isc_info_ppage_errors', 'isc_info_purge_count', 'isc_info_read_idx_count', + 'isc_info_read_seq_count', 'isc_info_reads', 'isc_info_record_errors', + 'isc_info_set_page_buffers', 'isc_info_sql_stmt_commit', + 'isc_info_sql_stmt_ddl', 'isc_info_sql_stmt_delete', + 'isc_info_sql_stmt_exec_procedure', 'isc_info_sql_stmt_get_segment', + 'isc_info_sql_stmt_insert', 'isc_info_sql_stmt_put_segment', + 'isc_info_sql_stmt_rollback', 'isc_info_sql_stmt_savepoint', + 'isc_info_sql_stmt_select', 'isc_info_sql_stmt_select_for_upd', + 'isc_info_sql_stmt_set_generator', 'isc_info_sql_stmt_start_trans', + 'isc_info_sql_stmt_update', 'isc_info_sweep_interval', 'isc_info_tpage_errors', + 'isc_info_tra_access', 'isc_info_tra_concurrency', 'isc_info_tra_consistency', + 'isc_info_tra_id', 'isc_info_tra_isolation', 'isc_info_tra_lock_timeout', + 'isc_info_tra_no_rec_version', 'isc_info_tra_oldest_active', + 'isc_info_tra_oldest_interesting', 'isc_info_tra_oldest_snapshot', + 'isc_info_tra_read_committed', 'isc_info_tra_readonly', 'fb_info_tra_dbpath', + 'isc_info_tra_readwrite', 'isc_info_tra_rec_version', 'isc_info_update_count', + 'isc_info_user_names', 'isc_info_version', 'isc_info_wal_avg_grpc_size', + 'isc_info_wal_avg_io_size', 'isc_info_wal_buffer_size', + 'isc_info_wal_ckpt_length', 'isc_info_wal_cur_ckpt_interval', + 'isc_info_wal_grpc_wait_usecs', 'isc_info_wal_num_commits', + 'isc_info_wal_num_io', 'isc_info_wal_prv_ckpt_fname', + 'isc_info_wal_prv_ckpt_poffset', 'isc_info_wal_recv_ckpt_fname', + 'isc_info_wal_recv_ckpt_poffset', 'isc_info_window_turns', + 'isc_info_writes', 'isc_tpb_autocommit', 'isc_tpb_commit_time', + 'isc_tpb_concurrency', 'isc_tpb_consistency', 'isc_tpb_exclusive', + 'isc_tpb_ignore_limbo', 'isc_tpb_lock_read', 'isc_tpb_lock_timeout', + 'isc_tpb_lock_write', 'isc_tpb_no_auto_undo', 'isc_tpb_no_rec_version', + 'isc_tpb_nowait', 'isc_tpb_protected', 'isc_tpb_read', + 'isc_tpb_read_committed', 'isc_tpb_rec_version', 'isc_tpb_restart_requests', + 'isc_tpb_shared', 'isc_tpb_verb_time', 'isc_tpb_version3', 'isc_tpb_wait', + 'isc_tpb_write', 'paramstyle', 'threadsafety', + # New in FDB + 'ISOLATION_LEVEL_READ_COMMITED', 'ISOLATION_LEVEL_READ_COMMITED_LEGACY', + 'ISOLATION_LEVEL_REPEATABLE_READ', 'ISOLATION_LEVEL_SERIALIZABLE', + 'ISOLATION_LEVEL_SNAPSHOT', 'ISOLATION_LEVEL_SNAPSHOT_TABLE_STABILITY', + 'ISOLATION_LEVEL_READ_COMMITED_RO', + 'MAX_BLOB_SEGMENT_SIZE', + 'SQL_ARRAY', 'SQL_BLOB', 'SQL_DOUBLE', 'SQL_D_FLOAT', 'SQL_FLOAT', + 'SQL_INT64', 'SQL_LONG', 'SQL_QUAD', 'SQL_SHORT', 'SQL_TEXT', + 'SQL_TIMESTAMP', 'SQL_TYPE_DATE', 'SQL_TYPE_TIME', 'SQL_VARYING', + 'SUBTYPE_DECIMAL', 'SUBTYPE_NUMERIC', 'SQL_BOOLEAN', + 'charset_map', 'load_api', + 'isc_info_end', 'bs', 'ConnectionWithSchema', # 'isc_sqlcode', + 'ODS_FB_20', 'ODS_FB_21', 'ODS_FB_25', 'ODS_FB_30') diff -Nru fdb-1.6.1+dfsg1/fdb/log.py fdb-2.0.0/fdb/log.py --- fdb-1.6.1+dfsg1/fdb/log.py 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/fdb/log.py 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,83 @@ +#coding:utf-8 +# +# PROGRAM/MODULE: fdb +# FILE: log.py +# DESCRIPTION: Python driver for Firebird - Firebird server log parser +# CREATED: 11.4.2018 +# +# Software distributed under the License is distributed AS IS, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. +# See the License for the specific language governing rights +# and limitations under the License. +# +# The Original Code was created by Pavel Cisar +# +# Copyright (c) Pavel Cisar +# and all contributors signed below. +# +# All Rights Reserved. +# Contributor(s): ______________________________________. +# +# See LICENSE.TXT for details. + +from fdb import ParseError +from datetime import datetime +from collections import namedtuple +from locale import LC_ALL, getlocale, setlocale, resetlocale +import sys + +LogEntry = namedtuple('LogEntry', 'source_id,timestamp,message') + +def parse(lines): + """Parse Firebird server log and yield named tuples describing individual log entries/events. + + :param lines: Iterable of lines from Firebird server log. + + :raises `~fdb.ParseError`: When any problem is found in input stream. +""" + line_no = 0 + locale = getlocale(LC_ALL) + if sys.platform == 'win32': + setlocale(LC_ALL, 'English_United States') + else: + setlocale(LC_ALL, 'en_US') + try: + clean = (line.strip() for line in lines) + entry_lines = [] + timestamp = None + source_id = 'UNKNOWN' + for line in clean: + line_no += 1 + if line == '': + continue + items = line.split() + if len(items) > 5: # It's potentially new entry + try: + new_timestamp = datetime.strptime(' '.join(items[len(items)-5:]), + '%a %b %d %H:%M:%S %Y') + except ValueError: + new_timestamp = None + if new_timestamp is not None: + if entry_lines: + yield LogEntry(source_id=source_id, timestamp=timestamp, + message='\n'.join(entry_lines)) + entry_lines = [] + # Init new entry + timestamp = new_timestamp + source_id = ' '.join(items[:len(items)-5]) + else: + entry_lines.append(line) + else: + entry_lines.append(line) + if entry_lines: + yield LogEntry(source_id=source_id, timestamp=timestamp, message='\n'.join(entry_lines)) + except Exception as e: + raise ParseError("Can't parse line %d\n%s" % (line_no, e.message)) + finally: + if locale[0] is None: + if sys.platform == 'win32': + setlocale(LC_ALL, '') + else: + resetlocale(LC_ALL) + else: + setlocale(LC_ALL, locale) diff -Nru fdb-1.6.1+dfsg1/fdb/monitor.py fdb-2.0.0/fdb/monitor.py --- fdb-1.6.1+dfsg1/fdb/monitor.py 2015-12-29 13:45:45.000000000 +0000 +++ fdb-2.0.0/fdb/monitor.py 2018-04-26 14:39:03.000000000 +0000 @@ -2,7 +2,7 @@ # # PROGRAM: fdb # MODULE: monitor.py -# DESCRIPTION: Database monitoring +# DESCRIPTION: Python driver for Firebird - Database monitoring # CREATED: 10.5.2013 # # Software distributed under the License is distributed AS IS, @@ -12,16 +12,16 @@ # # The Original Code was created by Pavel Cisar # -# Copyright (c) 2013 Pavel Cisar +# Copyright (c) Pavel Cisar # and all contributors signed below. # # All Rights Reserved. # Contributor(s): ______________________________________. +# +# See LICENSE.TXT for details. -import sys -import os import fdb -from fdb.utils import LateBindingProperty +from fdb.utils import LateBindingProperty, ObjectList import weakref # Current shutdown mode @@ -72,6 +72,7 @@ self._con = None self._ic = None self.__internal = False + self.clear() def __del__(self): if not self.closed: self._close() @@ -97,18 +98,17 @@ self.__fail_if_closed() if self._con.ods >= fdb.ODS_FB_21: self._ic.execute("select * from mon$database") - self.__database = DatabaseInfo(self,self._ic.fetchonemap()) - else: - self.__database = [] + self.__database = DatabaseInfo(self, self._ic.fetchonemap()) return self.__database def _get_attachments(self): if self.__attachments is None: self.__fail_if_closed() if self._con.ods >= fdb.ODS_FB_21: self._ic.execute("select * from mon$attachments") - self.__attachments = [AttachmentInfo(self,row) for row in self._ic.itermap()] + self.__attachments = ObjectList((AttachmentInfo(self, row) for row in self._ic.itermap()), AttachmentInfo, 'item.id') else: - self.__attachments = [] + self.__attachments = ObjectList() + self.__attachments.freeze() return self.__attachments def _get_this_attachment(self): return self.get_attachment(self._con.db_info(fdb.isc_info_attachment_id)) @@ -117,27 +117,30 @@ self.__fail_if_closed() if self._con.ods >= fdb.ODS_FB_21: self._ic.execute("select * from mon$transactions") - self.__transactions = [TransactionInfo(self,row) for row in self._ic.itermap()] + self.__transactions = ObjectList((TransactionInfo(self, row) for row in self._ic.itermap()), TransactionInfo, 'item.id') else: - self.__transactions = [] + self.__transactions = ObjectList() + self.__transactions.freeze() return self.__transactions def _get_statements(self): if self.__statements is None: self.__fail_if_closed() if self._con.ods >= fdb.ODS_FB_21: self._ic.execute("select * from mon$statements") - self.__statements = [StatementInfo(self,row) for row in self._ic.itermap()] + self.__statements = ObjectList((StatementInfo(self, row) for row in self._ic.itermap()), StatementInfo, 'item.id') else: - self.__statements = [] + self.__statements = ObjectList() + self.__statements.freeze() return self.__statements def _get_callstack(self): if self.__callstack is None: self.__fail_if_closed() if self._con.ods >= fdb.ODS_FB_21: self._ic.execute("select * from mon$call_stack") - self.__callstack = [CallStackInfo(self,row) for row in self._ic.itermap()] + self.__callstack = ObjectList((CallStackInfo(self, row) for row in self._ic.itermap()), CallStackInfo, 'item.id') else: - self.__callstack = [] + self.__callstack = ObjectList() + self.__callstack.freeze() return self.__callstack def _get_iostats(self): if self.__iostats is None: @@ -174,18 +177,20 @@ FROM MON$RECORD_STATS r join MON$IO_STATS io on r.MON$STAT_ID = io.MON$STAT_ID and r.MON$STAT_GROUP = io.MON$STAT_GROUP""") if self._con.ods >= fdb.ODS_FB_21: - self.__iostats = [IOStatsInfo(self,row) for row in self._ic.itermap()] + self.__iostats = ObjectList((IOStatsInfo(self, row) for row in self._ic.itermap()), IOStatsInfo, 'item.stat_id') else: - self.__iostats = [] + self.__iostats = ObjectList() + self.__iostats.freeze() return self.__iostats def _get_variables(self): if self.__variables is None: self.__fail_if_closed() if self._con.ods >= fdb.ODS_FB_25: self._ic.execute("select * from mon$context_variables") - self.__variables = [ContextVariableInfo(self,row) for row in self._ic.itermap()] + self.__variables = ObjectList((ContextVariableInfo(self, row) for row in self._ic.itermap()), ContextVariableInfo, 'item.stat_id') else: - self.__variables = [] + self.__variables = ObjectList() + self.__variables.freeze() return self.__variables def _get_tablestats(self): if self.__tablestats is None: @@ -198,34 +203,26 @@ r.MON$RECORD_CONFLICTS, r.MON$BACKVERSION_READS, r.MON$FRAGMENT_READS, r.MON$RECORD_RPT_READS FROM MON$TABLE_STATS ts join MON$RECORD_STATS r on ts.MON$RECORD_STAT_ID = r.MON$STAT_ID""") - self.__tablestats = [TableStatsInfo(self,row) for row in self._ic.itermap()] + self.__tablestats = ObjectList((TableStatsInfo(self, row) for row in self._ic.itermap()), TableStatsInfo, 'item.stat_id') else: - self.__tablestats = [] + self.__tablestats = ObjectList() + self.__tablestats.freeze() return self.__tablestats #--- Properties #: True if link to :class:`~fdb.Connection` is closed. closed = property(__get_closed) - db = LateBindingProperty(_get_database,None,None, - ":class:`DatabaseInfo` object for attached database.") - attachments = LateBindingProperty(_get_attachments,None,None, - "List of all attachments.\nItems are :class:`AttachmentInfo` objects.") - this_attachment = LateBindingProperty(_get_this_attachment,None,None, - ":class:`AttachmentInfo` object for current connection.") - transactions = LateBindingProperty(_get_transactions,None,None, - "List of all transactions.\nItems are :class:`TransactionInfo` objects.") - statements = LateBindingProperty(_get_statements,None,None, - "List of all statements.\nItems are :class:`StatementInfo` objects.") - callstack = LateBindingProperty(_get_callstack,None,None, - "List with complete call stack.\nItems are :class:`CallStackInfo` objects.") - iostats = LateBindingProperty(_get_iostats,None,None, - "List of all I/O statistics.\nItems are :class:`IOStatsInfo` objects.") - variables = LateBindingProperty(_get_variables,None,None, - "List of all context variables.\nItems are :class:`ContextVariableInfo` objects.") + db = LateBindingProperty(_get_database, doc=":class:`DatabaseInfo` object for attached database.") + attachments = LateBindingProperty(_get_attachments, doc=":class:`~fdb.utils.ObjectList` of all attachments.\nItems are :class:`AttachmentInfo` objects.") + this_attachment = LateBindingProperty(_get_this_attachment, doc=":class:`AttachmentInfo` object for current connection.") + transactions = LateBindingProperty(_get_transactions, doc=":class:`~fdb.utils.ObjectList` of all transactions.\nItems are :class:`TransactionInfo` objects.") + statements = LateBindingProperty(_get_statements, doc=":class:`~fdb.utils.ObjectList` of all statements.\nItems are :class:`StatementInfo` objects.") + callstack = LateBindingProperty(_get_callstack, doc=":class:`~fdb.utils.ObjectList` with complete call stack.\nItems are :class:`CallStackInfo` objects.") + iostats = LateBindingProperty(_get_iostats, doc=":class:`~fdb.utils.ObjectList` of all I/O statistics.\nItems are :class:`IOStatsInfo` objects.") + variables = LateBindingProperty(_get_variables, doc=":class:`~fdb.utils.ObjectList` of all context variables.\nItems are :class:`ContextVariableInfo` objects.") # FB 3.0 - tablestats = LateBindingProperty(_get_tablestats,None,None, - "List of all table record I/O statistics.\nItems are :class:`TableStatsInfo` objects.") + tablestats = LateBindingProperty(_get_tablestats, doc=":class:`~fdb.utils.ObjectList` of all table record I/O statistics.\nItems are :class:`TableStatsInfo` objects.") #--- Public @@ -234,7 +231,7 @@ :param connection: :class:`~fdb.Connection` instance. - :raises ProgrammingError: If Monitor object was set as internal (via + :raises fdb.ProgrammingError: If Monitor object was set as internal (via :meth:`_set_as_internal`) or database has ODS lower than 11.1. """ if self.__internal: @@ -250,7 +247,7 @@ def close(self): """Sever link to :class:`~fdb.Connection`. - :raises ProgrammingError: If Monitor object was set as internal (via + :raises fdb.ProgrammingError: If Monitor object was set as internal (via :meth:`_set_as_internal`). """ if self.__internal: @@ -276,7 +273,7 @@ self._ic.transaction.commit() self.clear() self._get_database() - def get_attachment(self,id): + def get_attachment(self, id): """Get :class:`AttachmentInfo` by ID. :param int id: Attachment ID. @@ -288,7 +285,7 @@ return attachment else: return None - def get_transaction(self,id): + def get_transaction(self, id): """Get :class:`TransactionInfo` by ID. :param int id: Transaction ID. @@ -300,7 +297,7 @@ return transaction else: return None - def get_statement(self,id): + def get_statement(self, id): """Get :class:`StatementInfo` by ID. :param int id: Statement ID. @@ -312,7 +309,7 @@ return statement else: return None - def get_call(self,id): + def get_call(self, id): """Get :class:`CallStackInfo` by ID. :param int id: Callstack ID. @@ -330,13 +327,13 @@ "Base class for all database monitoring objects." #: Weak reference to parent :class:`Monitor` instance. monitor = None - def __init__(self,monitor,attributes): - self.monitor = monitor if type(monitor) == weakref.ProxyType else weakref.proxy(monitor) + def __init__(self, monitor, attributes): + self.monitor = monitor if isinstance(monitor, weakref.ProxyType) else weakref.proxy(monitor) self._attributes = dict(attributes) #--- protected - def _strip_attribute(self,attr): + def _strip_attribute(self, attr): if self._attributes.get(attr): self._attributes[attr] = self._attributes[attr].strip() @@ -346,12 +343,12 @@ return self._attributes.get('MON$STAT_ID') #--- properties - stat_id = LateBindingProperty(_get_stat_id,None,None,"Internal ID.") + stat_id = LateBindingProperty(_get_stat_id, doc="Internal ID.") class DatabaseInfo(BaseInfoItem): "Information about attached database." - def __init__(self,monitor,attributes): - super(DatabaseInfo,self).__init__(monitor,attributes) + def __init__(self, monitor, attributes): + super(DatabaseInfo, self).__init__(monitor, attributes) self._strip_attribute('MON$DATABASE_NAME') self._strip_attribute('MON$OWNER') @@ -406,55 +403,39 @@ def __get_security_database(self): return self._attributes.get('MON$SEC_DATABASE') def __get_tablestats(self): - res = {} - for io in self.monitor.tablestats: - if (io.stat_id == self.stat_id) and (io.group == STAT_DATABASE): - res[io.table_name] = io - return res + return dict(((io.table_name, io) for io in self.monitor.tablestats if (io.stat_id == self.stat_id) and (io.group == STAT_DATABASE))) #--- properties - name = property(__get_name,None,None,"Database pathname or alias.") - page_size = property(__get_page_size,None,None,"Size of database page in bytes.") - ods = property(__get_ods,None,None,"On-Disk Structure (ODS) version number.") - oit = property(__get_oit,None,None, - "Transaction ID of the oldest [interesting] transaction.") - oat = property(__get_oat,None,None,"Transaction ID of the oldest active transaction.") - ost = property(__get_ost,None,None, - "Transaction ID of the Oldest Snapshot, i.e., the number of the OAT " \ - "when the last garbage collection was done.") - next_transaction = property(__get_next_transaction,None,None, - "Transaction ID of the next transaction that will be started.") - cache_size = property(__get_cache_size,None,None, - "Number of pages allocated in the page cache.") - sql_dialect = property(__get_sql_dialect,None,None,"SQL dialect of the database.") - shutdown_mode = property(__get_shutdown_mode,None,None,"Current shutdown mode.") - sweep_interval = property(__get_sweep_interval,None,None, - "The sweep interval configured in the database header. " \ + name = property(__get_name, doc="Database pathname or alias.") + page_size = property(__get_page_size, doc="Size of database page in bytes.") + ods = property(__get_ods, doc="On-Disk Structure (ODS) version number.") + oit = property(__get_oit, doc="Transaction ID of the oldest [interesting] transaction.") + oat = property(__get_oat, doc="Transaction ID of the oldest active transaction.") + ost = property(__get_ost, doc="Transaction ID of the Oldest Snapshot, i.e., the number of the OAT when the last garbage collection was done.") + next_transaction = property(__get_next_transaction, doc="Transaction ID of the next transaction that will be started.") + cache_size = property(__get_cache_size, doc="Number of pages allocated in the page cache.") + sql_dialect = property(__get_sql_dialect, doc="SQL dialect of the database.") + shutdown_mode = property(__get_shutdown_mode, doc="Current shutdown mode.") + sweep_interval = property(__get_sweep_interval, doc="The sweep interval configured in the database header. " \ "Value 0 indicates that sweeping is disabled.") - read_only = property(__get_read_only,None,None,"True if database is Read Only.") - forced_writes = property(__get_forced_writes,None,None, - "True if database uses synchronous writes.") - reserve_space = property(__get_reserve_space,None,None, - "True if database reserves space on data pages.") - created = property(__get_created,None,None, - "Creation date and time, i.e., when the database was created or last restored.") - pages = property(__get_pages,None,None,"Number of pages allocated on disk.") - backup_state = property(__get_backup_state,None,None, - "Current state of database with respect to nbackup physical backup.") - iostats = property(__get_iostats,None,None,":class:`IOStatsInfo` for this object.") + read_only = property(__get_read_only, doc="True if database is Read Only.") + forced_writes = property(__get_forced_writes, doc="True if database uses synchronous writes.") + reserve_space = property(__get_reserve_space, doc="True if database reserves space on data pages.") + created = property(__get_created, doc="Creation date and time, i.e., when the database was created or last restored.") + pages = property(__get_pages, doc="Number of pages allocated on disk.") + backup_state = property(__get_backup_state, doc="Current state of database with respect to nbackup physical backup.") + iostats = property(__get_iostats, doc=":class:`IOStatsInfo` for this object.") # FB 3.0 - crypt_page = property(__get_crypt_page,None,None,"Number of page being encrypted.") - owner = property(__get_owner,None,None,"User name of database owner.") - security_database = property(__get_security_database,None,None, - "TYpe of security database (Default, Self or Other).") - tablestats = property(__get_tablestats,None,None, - "Dictionary of :class:`TableStatsInfo` instances for this object.") + crypt_page = property(__get_crypt_page, doc="Number of page being encrypted.") + owner = property(__get_owner, doc="User name of database owner.") + security_database = property(__get_security_database, doc="Type of security database (Default, Self or Other).") + tablestats = property(__get_tablestats, doc="Dictionary of :class:`TableStatsInfo` instances for this object.") class AttachmentInfo(BaseInfoItem): "Information about attachment (connection) to database." - def __init__(self,monitor,attributes): - super(AttachmentInfo,self).__init__(monitor,attributes) + def __init__(self, monitor, attributes): + super(AttachmentInfo, self).__init__(monitor, attributes) self._strip_attribute('MON$ATTACHMENT_NAME') self._strip_attribute('MON$USER') @@ -495,14 +476,11 @@ def __get_timestamp(self): return self._attributes['MON$TIMESTAMP'] def _get_transactions(self): - return [t for t in self.monitor.transactions - if t._attributes['MON$ATTACHMENT_ID'] == self.id] + return self.monitor.transactions.filter(lambda s: s._attributes['MON$ATTACHMENT_ID'] == self.id) def _get_statements(self): - return [s for s in self.monitor.statements - if s._attributes['MON$ATTACHMENT_ID'] == self.id] + return self.monitor.statements.filter(lambda s: s._attributes['MON$ATTACHMENT_ID'] == self.id) def _get_variables(self): - return [s for s in self.monitor.variables - if s._attributes['MON$ATTACHMENT_ID'] == self.id] + return self.monitor.variables.filter(lambda s: s._attributes['MON$ATTACHMENT_ID'] == self.id) def __get_iostats(self): for io in self.monitor.iostats: if (io.stat_id == self.stat_id) and (io.group == STAT_ATTACHMENT): @@ -518,43 +496,37 @@ return self._attributes.get('MON$REMOTE_OS_USER') def __get_remote_host(self): return self._attributes.get('MON$REMOTE_HOST') + def __get_system(self): + return bool(self._attributes.get('MON$SYSTEM_FLAG')) def __get_tablestats(self): - res = {} - for io in self.monitor.tablestats: - if (io.stat_id == self.stat_id) and (io.group == STAT_ATTACHMENT): - res[io.table_name] = io - return res + return dict(((io.table_name, io) for io in self.monitor.tablestats if (io.stat_id == self.stat_id) and (io.group == STAT_ATTACHMENT))) #--- properties - id = property(__get_id,None,None,"Attachment ID.") - server_pid = property(__get_server_pid,None,None,"Server process ID.") - state = property(__get_state,None,None,"Attachment state (idle/active).") - name = property(__get_name,None,None,"Database pathname or alias.") - user = property(__get_user,None,None,"User name.") - role = property(__get_role,None,None,"Role name.") - remote_protocol = property(__get_remote_protocol,None,None,"Remote protocol name.") - remote_address = property(__get_remote_address,None,None,"Remote address.") - remote_pid = property(__get_remote_pid,None,None,"Remote client process ID.") - remote_process = property(__get_remote_process,None,None,"Remote client process pathname.") - character_set = property(__get_character_set,None,None, - ":class:`~fdb.schema.CharacterSet` for this attachment.") - timestamp = property(__get_timestamp,None,None,"Attachment date/time.") - transactions = LateBindingProperty(_get_transactions,None,None, - "List of transactions associated with attachment.\nItems are :class:`TransactionInfo` objects.") - statements = LateBindingProperty(_get_statements,None,None, - "List of statements associated with attachment.\nItems are :class:`StatementInfo` objects.") - variables = LateBindingProperty(_get_variables,None,None, - "List of variables associated with attachment.\nItems are :class:`ContextVariableInfo` objects.") - iostats = property(__get_iostats,None,None,":class:`IOStatsInfo` for this object.") + id = property(__get_id, doc="Attachment ID.") + server_pid = property(__get_server_pid, doc="Server process ID.") + state = property(__get_state, doc="Attachment state (idle/active).") + name = property(__get_name, doc="Database pathname or alias.") + user = property(__get_user, doc="User name.") + role = property(__get_role, doc="Role name.") + remote_protocol = property(__get_remote_protocol, doc="Remote protocol name.") + remote_address = property(__get_remote_address, doc="Remote address.") + remote_pid = property(__get_remote_pid, doc="Remote client process ID.") + remote_process = property(__get_remote_process, doc="Remote client process pathname.") + character_set = property(__get_character_set, doc=":class:`~fdb.schema.CharacterSet` for this attachment.") + timestamp = property(__get_timestamp, doc="Attachment date/time.") + transactions = LateBindingProperty(_get_transactions, doc=":class:`~fdb.utils.ObjectList` of transactions associated with attachment.\nItems are :class:`TransactionInfo` objects.") + statements = LateBindingProperty(_get_statements, doc=":class:`~fdb.utils.ObjectList` of statements associated with attachment.\nItems are :class:`StatementInfo` objects.") + variables = LateBindingProperty(_get_variables, doc=":class:`~fdb.utils.ObjectList` of variables associated with attachment.\nItems are :class:`ContextVariableInfo` objects.") + iostats = property(__get_iostats, doc=":class:`IOStatsInfo` for this object.") # FB 3.0 - auth_method = property(__get_auth_method,None,None,"Authentication method.") - client_version = property(__get_client_version,None,None,"Client library version.") - remote_version = property(__get_remote_version,None,None,"Remote protocol version.") - remote_os_user = property(__get_remote_os_user,None,None,"OS user name of client process.") - remote_host = property(__get_remote_host,None,None,"Name of remote host.") - tablestats = property(__get_tablestats,None,None, - "Dictionary of :class:`TableStatsInfo` instances for this object.") + auth_method = property(__get_auth_method, doc="Authentication method.") + client_version = property(__get_client_version, doc="Client library version.") + remote_version = property(__get_remote_version, doc="Remote protocol version.") + remote_os_user = property(__get_remote_os_user, doc="OS user name of client process.") + remote_host = property(__get_remote_host, doc="Name of remote host.") + system = property(__get_system, None, None, "True for system attachments.") + tablestats = property(__get_tablestats, doc="Dictionary of :class:`TableStatsInfo` instances for this object.") #--- Public @@ -573,7 +545,7 @@ def terminate(self): """Terminates client session associated with this attachment. - :raises ProgrammingError: If database has ODS lower than 11.2 or + :raises fdb.ProgrammingError: If database has ODS lower than 11.2 or this attachement is current session. """ if self.monitor._con.ods < fdb.ODS_FB_25: @@ -588,8 +560,8 @@ class TransactionInfo(BaseInfoItem): "Information about transaction." - def __init__(self,monitor,attributes): - super(TransactionInfo,self).__init__(monitor,attributes) + def __init__(self, monitor, attributes): + super(TransactionInfo, self).__init__(monitor, attributes) #--- Protected @@ -612,43 +584,33 @@ def __get_lock_timeout(self): return self._attributes['MON$LOCK_TIMEOUT'] def _get_statements(self): - return [s for s in self.monitor.statements - if s._attributes['MON$TRANSACTION_ID'] == self.id] + return self.monitor.statements.filter(lambda s: s._attributes['MON$TRANSACTION_ID'] == self.id) def _get_variables(self): - return [s for s in self.monitor.variables - if s._attributes['MON$TRANSACTION_ID'] == self.id] + return self.monitor.variables.filter(lambda s: s._attributes['MON$TRANSACTION_ID'] == self.id) def __get_iostats(self): for io in self.monitor.iostats: if (io.stat_id == self.stat_id) and (io.group == STAT_TRANSACTION): return io return None def __get_tablestats(self): - res = {} - for io in self.monitor.tablestats: - if (io.stat_id == self.stat_id) and (io.group == STAT_TRANSACTION): - res[io.table_name] = io - return res + return dict(((io.table_name, io) for io in self.monitor.tablestats if (io.stat_id == self.stat_id) and (io.group == STAT_TRANSACTION))) #--- properties - id = property(__get_id,None,None,"Transaction ID.") - attachment = property(__get_attachment,None,None, - ":class:`AttachmentInfo` instance to which this transaction belongs.") - state = property(__get_state,None,None,"Transaction state (idle/active).") - timestamp = property(__get_timestamp,None,None,"Transaction start date/time.") - top = property(__get_top,None,None,"Top transaction.") - oldest = property(__get_oldest,None,None,"Oldest transaction (local OIT).") - oldest_active = property(__get_oldest_active,None,None,"Oldest active transaction (local OAT).") - isolation_mode = property(__get_isolation_mode,None,None,"Transaction isolation mode code.") - lock_timeout = property(__get_lock_timeout,None,None,"Lock timeout.") - statements = LateBindingProperty(_get_statements,None,None, - "List of statements associated with transaction.\nItems are :class:`StatementInfo` objects.") - variables = LateBindingProperty(_get_variables,None,None, - "List of variables associated with transaction.\nItems are :class:`ContextVariableInfo` objects.") - iostats = property(__get_iostats,None,None,":class:`IOStatsInfo` for this object.") + id = property(__get_id, doc="Transaction ID.") + attachment = property(__get_attachment, doc=":class:`AttachmentInfo` instance to which this transaction belongs.") + state = property(__get_state, doc="Transaction state (idle/active).") + timestamp = property(__get_timestamp, doc="Transaction start date/time.") + top = property(__get_top, doc="Top transaction.") + oldest = property(__get_oldest, doc="Oldest transaction (local OIT).") + oldest_active = property(__get_oldest_active, doc="Oldest active transaction (local OAT).") + isolation_mode = property(__get_isolation_mode, doc="Transaction isolation mode code.") + lock_timeout = property(__get_lock_timeout, doc="Lock timeout.") + statements = LateBindingProperty(_get_statements, doc=":class:`~fdb.utils.ObjectList` of statements associated with transaction.\nItems are :class:`StatementInfo` objects.") + variables = LateBindingProperty(_get_variables, doc=":class:`~fdb.utils.ObjectList` of variables associated with transaction.\nItems are :class:`ContextVariableInfo` objects.") + iostats = property(__get_iostats, doc=":class:`IOStatsInfo` for this object.") # FB 3.0 - tablestats = property(__get_tablestats,None,None, - "Dictionary of :class:`TableStatsInfo` instances for this object.") + tablestats = property(__get_tablestats, doc="Dictionary of :class:`TableStatsInfo` instances for this object.") #--- Public @@ -673,8 +635,8 @@ class StatementInfo(BaseInfoItem): "Information about executed SQL statement." - def __init__(self,monitor,attributes): - super(StatementInfo,self).__init__(monitor,attributes) + def __init__(self, monitor, attributes): + super(StatementInfo, self).__init__(monitor, attributes) self._strip_attribute('MON$SQL_TEXT') self._strip_attribute('MON$EXPLAINED_PLAN') @@ -695,9 +657,8 @@ def __get_sql_text(self): return self._attributes['MON$SQL_TEXT'] def __get_callstack(self): - callstack = [x for x in self.monitor.callstack - if ((x._attributes['MON$STATEMENT_ID'] == self.id) and - (x._attributes['MON$CALLER_ID'] is None))] + callstack = self.monitor.callstack.filter(lambda x: ((x._attributes['MON$STATEMENT_ID'] == self.id) and + (x._attributes['MON$CALLER_ID'] is None))) if len(callstack) > 0: item = callstack[0] while item is not None: @@ -717,29 +678,21 @@ def __get_plan(self): return self._attributes.get('MON$EXPLAINED_PLAN') def __get_tablestats(self): - res = {} - for io in self.monitor.tablestats: - if (io.stat_id == self.stat_id) and (io.group == STAT_STATEMENT): - res[io.table_name] = io - return res + return dict(((io.table_name, io) for io in self.monitor.tablestats if (io.stat_id == self.stat_id) and (io.group == STAT_STATEMENT))) #--- properties - id = property(__get_id,None,None,"Statement ID.") - attachment = property(__get_attachment,None,None, - ":class:`AttachmentInfo` instance to which this statement belongs.") - transaction = property(__get_transaction,None,None, - ":class:`TransactionInfo` instance to which this statement belongs or None.") - state = property(__get_state,None,None,"Statement state (idle/active).") - timestamp = property(__get_timestamp,None,None,"Statement start date/time.") - sql_text = property(__get_sql_text,None,None,"Statement text, if appropriate.") - callstack = property(__get_callstack,None,None, - "List with call stack for statement.\nItems are :class:`CallStackInfo` objects.") - iostats = property(__get_iostats,None,None,":class:`IOStatsInfo` for this object.") + id = property(__get_id, doc="Statement ID.") + attachment = property(__get_attachment, doc=":class:`AttachmentInfo` instance to which this statement belongs.") + transaction = property(__get_transaction, doc=":class:`TransactionInfo` instance to which this statement belongs or None.") + state = property(__get_state, doc="Statement state (idle/active).") + timestamp = property(__get_timestamp, doc="Statement start date/time.") + sql_text = property(__get_sql_text, doc="Statement text, if appropriate.") + callstack = property(__get_callstack, doc=":class:`~fdb.utils.ObjectList` with call stack for statement.\nItems are :class:`CallStackInfo` objects.") + iostats = property(__get_iostats, doc=":class:`IOStatsInfo` for this object.") # FB 3.0 - plan = property(__get_plan,None,None,"Explained execution plan.") - tablestats = property(__get_tablestats,None,None, - "Dictionary of :class:`TableStatsInfo` instances for this object.") + plan = property(__get_plan, doc="Explained execution plan.") + tablestats = property(__get_tablestats, doc="Dictionary of :class:`TableStatsInfo` instances for this object.") #--- Public @@ -752,7 +705,7 @@ def terminate(self): """Terminates execution of statement. - :raises ProgrammingError: If this attachement is current session. + :raises fdb.ProgrammingError: If this attachement is current session. """ if self.attachment == self.monitor.this_attachment: raise fdb.ProgrammingError("Can't terminate statement from current session.") @@ -762,8 +715,8 @@ class CallStackInfo(BaseInfoItem): "Information about PSQL call (stack frame)." - def __init__(self,monitor,attributes): - super(CallStackInfo,self).__init__(monitor,attributes) + def __init__(self, monitor, attributes): + super(CallStackInfo, self).__init__(monitor, attributes) self._strip_attribute('MON$OBJECT_NAME') self._strip_attribute('MON$PACKAGE_NAME') @@ -801,26 +754,23 @@ #--- properties - id = property(__get_id,None,None,"Call ID.") - statement = property(__get_statement,None,None, - "Top-level :class:`StatementInfo` instance to which this call stack entry belongs.") - caller = property(__get_caller,None,None, - "Call stack entry (:class:`CallStackInfo`) of the caller.") - dbobject = property(__get_dbobject,None,None, - "PSQL object. :class:`~fdb.schema.Procedure` or :class:`~fdb.schema.Trigger` instance.") - timestamp = property(__get_timestamp,None,None,"Request start date/time.") - line = property(__get_line,None,None,"SQL source line number.") - column = property(__get_column,None,None,"SQL source column number.") - iostats = property(__get_iostats,None,None,":class:`IOStatsInfo` for this object.") + id = property(__get_id, doc="Call ID.") + statement = property(__get_statement, doc="Top-level :class:`StatementInfo` instance to which this call stack entry belongs.") + caller = property(__get_caller, doc="Call stack entry (:class:`CallStackInfo`) of the caller.") + dbobject = property(__get_dbobject, doc="PSQL object. :class:`~fdb.schema.Procedure` or :class:`~fdb.schema.Trigger` instance.") + timestamp = property(__get_timestamp, doc="Request start date/time.") + line = property(__get_line, doc="SQL source line number.") + column = property(__get_column, doc="SQL source column number.") + iostats = property(__get_iostats, doc=":class:`IOStatsInfo` for this object.") # FB 3.0 - package_name = property(__get_package_name,None,None,"Package name.") + package_name = property(__get_package_name, doc="Package name.") #--- Public class IOStatsInfo(BaseInfoItem): "Information about page and row level I/O operations, and about memory consumption." - def __init__(self,monitor,attributes): - super(IOStatsInfo,self).__init__(monitor,attributes) + def __init__(self, monitor, attributes): + super(IOStatsInfo, self).__init__(monitor, attributes) #--- Protected @@ -893,52 +843,44 @@ #--- properties - owner = property(__get_owner,None,None, - """Object that owns this IOStats instance. Could be either + owner = property(__get_owner, doc="""Object that owns this IOStats instance. Could be either :class:`DatabaseInfo`, :class:`AttachmentInfo`, :class:`TransactionInfo`, :class:`StatementInfo` or :class:`CallStackInfo` instance.""") - group = property(__get_group,None,None,"Object group code.") - reads = property(__get_reads,None,None,"Number of page reads.") - writes = property(__get_writes,None,None,"Number of page writes.") - fetches = property(__get_fetches,None,None,"Number of page fetches.") - marks = property(__get_marks,None,None,"Number of pages with changes pending.") - seq_reads = property(__get_seq_reads,None,None,"Number of records read sequentially.") - idx_reads = property(__get_idx_reads,None,None,"Number of records read via an index.") - inserts = property(__get_inserts,None,None,"Number of inserted records.") - updates = property(__get_updates,None,None,"Number of updated records.") - deletes = property(__get_deletes,None,None,"Number of deleted records.") - backouts = property(__get_backouts,None,None, - "Number of records where a new primary record version or a change to " \ + group = property(__get_group, doc="Object group code.") + reads = property(__get_reads, doc="Number of page reads.") + writes = property(__get_writes, doc="Number of page writes.") + fetches = property(__get_fetches, doc="Number of page fetches.") + marks = property(__get_marks, doc="Number of pages with changes pending.") + seq_reads = property(__get_seq_reads, doc="Number of records read sequentially.") + idx_reads = property(__get_idx_reads, doc="Number of records read via an index.") + inserts = property(__get_inserts, doc="Number of inserted records.") + updates = property(__get_updates, doc="Number of updated records.") + deletes = property(__get_deletes, doc="Number of deleted records.") + backouts = property(__get_backouts, doc="Number of records where a new primary record version or a change to " \ "an existing primary record version is backed out due to rollback or " \ "savepoint undo.") - purges = property(__get_purges,None,None, - "Number of records where record version chain is being purged of " \ + purges = property(__get_purges, doc="Number of records where record version chain is being purged of " \ "versions no longer needed by OAT or younger transactions.") - expunges = property(__get_expunges,None,None, - "Number of records where record version chain is being deleted due to " \ + expunges = property(__get_expunges, doc="Number of records where record version chain is being deleted due to " \ "deletions by transactions older than OAT.") - memory_used = property(__get_memory_used,None,None,"Number of bytes currently in use.") - memory_allocated = property(__get_memory_allocated,None,None, - "Number of bytes currently allocated at the OS level.") - max_memory_used = property(__get_max_memory_used,None,None, - "Maximum number of bytes used by this object.") - max_memory_allocated = property(__get_max_memory_allocated,None,None, - "Maximum number of bytes allocated from the operating system by this object.") + memory_used = property(__get_memory_used, doc="Number of bytes currently in use.") + memory_allocated = property(__get_memory_allocated, doc="Number of bytes currently allocated at the OS level.") + max_memory_used = property(__get_max_memory_used, doc="Maximum number of bytes used by this object.") + max_memory_allocated = property(__get_max_memory_allocated, doc="Maximum number of bytes allocated from the operating system by this object.") # FB 3.0 - locks = property(__get_locks,None,None,"Number of record locks.") - waits = property(__get_waits,None,None,"Number of record waits.") - conflits = property(__get_conflits,None,None,"Number of record conflits.") - backversion_reads = property(__get_backversion_reads,None,None, - "Number of record backversion reads.") - fragment_reads = property(__get_fragment_reads,None,None,"Number of record fragment reads.") - repeated_reads = property(__get_repeated_reads,None,None,"Number of repeated record reads.") + locks = property(__get_locks, doc="Number of record locks.") + waits = property(__get_waits, doc="Number of record waits.") + conflits = property(__get_conflits, doc="Number of record conflits.") + backversion_reads = property(__get_backversion_reads, doc="Number of record backversion reads.") + fragment_reads = property(__get_fragment_reads, doc="Number of record fragment reads.") + repeated_reads = property(__get_repeated_reads, doc="Number of repeated record reads.") #--- Public class TableStatsInfo(BaseInfoItem): "Information about row level I/O operations on single table." - def __init__(self,monitor,attributes): - super(TableStatsInfo,self).__init__(monitor,attributes) + def __init__(self, monitor, attributes): + super(TableStatsInfo, self).__init__(monitor, attributes) self._strip_attribute('MON$TABLE_NAME') #--- Protected @@ -1000,42 +942,36 @@ #--- properties - owner = property(__get_owner,None,None, - """Object that owns this TableStats instance. Could be either + owner = property(__get_owner, doc="""Object that owns this TableStats instance. Could be either :class:`DatabaseInfo`, :class:`AttachmentInfo`, :class:`TransactionInfo`, :class:`StatementInfo` or :class:`CallStackInfo` instance.""") - row_stat_id = property(__get_row_stat_id,None,None,"Internal ID.") - table_name = property(__get_table_name,None,None,"Table name.") - group = property(__get_group,None,None,"Object group code.") - seq_reads = property(__get_seq_reads,None,None,"Number of records read sequentially.") - idx_reads = property(__get_idx_reads,None,None,"Number of records read via an index.") - inserts = property(__get_inserts,None,None,"Number of inserted records.") - updates = property(__get_updates,None,None,"Number of updated records.") - deletes = property(__get_deletes,None,None,"Number of deleted records.") - backouts = property(__get_backouts,None,None, - "Number of records where a new primary record version or a change to " \ + row_stat_id = property(__get_row_stat_id, doc="Internal ID.") + table_name = property(__get_table_name, doc="Table name.") + group = property(__get_group, doc="Object group code.") + seq_reads = property(__get_seq_reads, doc="Number of records read sequentially.") + idx_reads = property(__get_idx_reads, doc="Number of records read via an index.") + inserts = property(__get_inserts, doc="Number of inserted records.") + updates = property(__get_updates, doc="Number of updated records.") + deletes = property(__get_deletes, doc="Number of deleted records.") + backouts = property(__get_backouts, doc="Number of records where a new primary record version or a change to " \ "an existing primary record version is backed out due to rollback or " \ "savepoint undo.") - purges = property(__get_purges,None,None, - "Number of records where record version chain is being purged of " \ + purges = property(__get_purges, doc="Number of records where record version chain is being purged of " \ "versions no longer needed by OAT or younger transactions.") - expunges = property(__get_expunges,None,None, - "Number of records where record version chain is being deleted due to " \ - "deletions by transactions older than OAT.") - locks = property(__get_locks,None,None,"Number of record locks.") - waits = property(__get_waits,None,None,"Number of record waits.") - conflits = property(__get_conflits,None,None,"Number of record conflits.") - backversion_reads = property(__get_backversion_reads,None,None, - "Number of record backversion reads.") - fragment_reads = property(__get_fragment_reads,None,None,"Number of record fragment reads.") - repeated_reads = property(__get_repeated_reads,None,None,"Number of repeated record reads.") + expunges = property(__get_expunges, doc="Number of records where record version chain is being deleted due to deletions by transactions older than OAT.") + locks = property(__get_locks, doc="Number of record locks.") + waits = property(__get_waits, doc="Number of record waits.") + conflits = property(__get_conflits, doc="Number of record conflits.") + backversion_reads = property(__get_backversion_reads, doc="Number of record backversion reads.") + fragment_reads = property(__get_fragment_reads, doc="Number of record fragment reads.") + repeated_reads = property(__get_repeated_reads, doc="Number of repeated record reads.") #--- Public class ContextVariableInfo(BaseInfoItem): "Information about context variable." - def __init__(self,monitor,attributes): - super(ContextVariableInfo,self).__init__(monitor,attributes) + def __init__(self, monitor, attributes): + super(ContextVariableInfo, self).__init__(monitor, attributes) self._strip_attribute('MON$VARIABLE_NAME') self._strip_attribute('MON$VARIABLE_VALUE') @@ -1054,12 +990,10 @@ #--- properties - attachment = property(__get_attachment,None,None, - ":class:`AttachmentInfo` instance to which this context variable belongs or None.") - transaction = property(__get_transaction,None,None, - ":class:`TransactionInfo` instance to which this context variable belongs or None.") - name = property(__get_name,None,None,"Context variable name.") - value = property(__get_value,None,None,"Value of context variable.") + attachment = property(__get_attachment, doc=":class:`AttachmentInfo` instance to which this context variable belongs or None.") + transaction = property(__get_transaction, doc=":class:`TransactionInfo` instance to which this context variable belongs or None.") + name = property(__get_name, doc="Context variable name.") + value = property(__get_value, doc="Value of context variable.") #--- Public diff -Nru fdb-1.6.1+dfsg1/fdb/schema.py fdb-2.0.0/fdb/schema.py --- fdb-1.6.1+dfsg1/fdb/schema.py 2016-01-13 11:05:54.000000000 +0000 +++ fdb-2.0.0/fdb/schema.py 2018-04-26 14:39:03.000000000 +0000 @@ -2,7 +2,7 @@ # # PROGRAM: fdb # MODULE: schema.py -# DESCRIPTION: Database schema +# DESCRIPTION: Python driver for Firebird - Database schema # CREATED: 10.5.2013 # # Software distributed under the License is distributed AS IS, @@ -12,23 +12,22 @@ # # The Original Code was created by Pavel Cisar # -# Copyright (c) 2013 Pavel Cisar +# Copyright (c) Pavel Cisar # and all contributors signed below. # # All Rights Reserved. # Contributor(s): ______________________________________. +# +# See LICENSE.TXT for details. -import sys -import os import fdb -#from . import fbcore as fdb -from fdb.utils import LateBindingProperty +from fdb.utils import LateBindingProperty, ObjectList, Visitable import string import weakref from itertools import groupby +import collections -# Firebird Field Types - +# Firebird field type codes FBT_SMALLINT = 7 FBT_INTEGER = 8 FBT_QUAD = 9 @@ -48,13 +47,14 @@ MAX_INTSUBTYPES = 2 MAX_BLOBSUBTYPES = 8 - +# Trigger masks TRIGGER_TYPE_SHIFT = 13 TRIGGER_TYPE_MASK = (0x3 << TRIGGER_TYPE_SHIFT) TRIGGER_TYPE_DML = (0 << TRIGGER_TYPE_SHIFT) TRIGGER_TYPE_DB = (1 << TRIGGER_TYPE_SHIFT) TRIGGER_TYPE_DDL = (2 << TRIGGER_TYPE_SHIFT) +# Trigger type codes DDL_TRIGGER_ANY = 4611686018427375615 # 9223372036854751229 DDL_TRIGGER_CREATE_TABLE = 1 DDL_TRIGGER_ALTER_TABLE = 2 @@ -102,165 +102,231 @@ DDL_TRIGGER_ALTER_MAPPING = 46 DDL_TRIGGER_DROP_MAPPING = 47 - -COLUMN_TYPES = {None: 'UNKNOWN', - FBT_SMALLINT: 'SMALLINT', - FBT_INTEGER: 'INTEGER', - FBT_QUAD: 'QUAD', - FBT_FLOAT: 'FLOAT', - FBT_CHAR: 'CHAR', - FBT_DOUBLE_PRECISION: 'DOUBLE PRECISION', - FBT_VARCHAR: 'VARCHAR', - FBT_CSTRING: 'CSTRING', - FBT_BLOB_ID: 'BLOB_ID', - FBT_BLOB: 'BLOB', - FBT_SQL_TIME: 'TIME', - FBT_SQL_DATE: 'DATE', - FBT_SQL_TIMESTAMP: 'TIMESTAMP', - FBT_BIGINT: 'BIGINT', - FBT_BOOLEAN: 'BOOLEAN', - } -INTEGRAL_SUBTYPES = ('UNKNOWN','NUMERIC','DECIMAL') -BLOB_SUBTYPES = ('BINARY','TEXT','BLR','ACL','RANGES','SUMMARY', - 'FORMAT','TRANSACTION_DESCRIPTION','EXTERNAL_FILE_DESCRIPTION', +# Lists and disctionary maps +COLUMN_TYPES = {None: 'UNKNOWN', FBT_SMALLINT: 'SMALLINT', FBT_INTEGER: 'INTEGER', + FBT_QUAD: 'QUAD', FBT_FLOAT: 'FLOAT', FBT_CHAR: 'CHAR', + FBT_DOUBLE_PRECISION: 'DOUBLE PRECISION', FBT_VARCHAR: 'VARCHAR', + FBT_CSTRING: 'CSTRING', FBT_BLOB_ID: 'BLOB_ID', FBT_BLOB: 'BLOB', + FBT_SQL_TIME: 'TIME', FBT_SQL_DATE: 'DATE', FBT_SQL_TIMESTAMP: 'TIMESTAMP', + FBT_BIGINT: 'BIGINT', FBT_BOOLEAN: 'BOOLEAN'} +INTEGRAL_SUBTYPES = ('UNKNOWN', 'NUMERIC', 'DECIMAL') +BLOB_SUBTYPES = ('BINARY', 'TEXT', 'BLR', 'ACL', 'RANGES', 'SUMMARY', + 'FORMAT', 'TRANSACTION_DESCRIPTION', 'EXTERNAL_FILE_DESCRIPTION', 'DEBUG_INFORMATION') -TRIGGER_PREFIX_TYPES = ['BEFORE','AFTER'] -TRIGGER_SUFFIX_TYPES = ['','INSERT','UPDATE','DELETE'] -TRIGGER_DB_TYPES = ['CONNECT','DISCONNECT','TRANSACTION START', - 'TRANSACTION COMMIT','TRANSACTION ROLLBACK'] -TRIGGER_DDL_TYPES = [None,"CREATE TABLE","ALTER TABLE","DROP TABLE", - "CREATE PROCEDURE","ALTER PROCEDURE","DROP PROCEDURE", - "CREATE FUNCTION","ALTER FUNCTION","DROP FUNCTION", - "CREATE TRIGGER","ALTER TRIGGER","DROP TRIGGER", - None,None,None, # gap for TRIGGER_TYPE_MASK - 3 bits - "CREATE EXCEPTION","ALTER EXCEPTION","DROP EXCEPTION", - "CREATE VIEW","ALTER VIEW","DROP VIEW", - "CREATE DOMAIN","ALTER DOMAIN","DROP DOMAIN", - "CREATE ROLE","ALTER ROLE","DROP ROLE", - "CREATE INDEX","ALTER INDEX","DROP INDEX", - "CREATE SEQUENCE","ALTER SEQUENCE","DROP SEQUENCE", - "CREATE USER","ALTER USER","DROP USER", - "CREATE COLLATION","DROP COLLATION","ALTER CHARACTER SET", - "CREATE PACKAGE","ALTER PACKAGE","DROP PACKAGE", - "CREATE PACKAGE BODY","DROP PACKAGE BODY", - "CREATE MAPPING","ALTER MAPPING","DROP MAPPING"] +TRIGGER_PREFIX_TYPES = ['BEFORE', 'AFTER'] +TRIGGER_SUFFIX_TYPES = ['', 'INSERT', 'UPDATE', 'DELETE'] +TRIGGER_DB_TYPES = ['CONNECT', 'DISCONNECT', 'TRANSACTION START', + 'TRANSACTION COMMIT', 'TRANSACTION ROLLBACK'] +TRIGGER_DDL_TYPES = [None, "CREATE TABLE", "ALTER TABLE", "DROP TABLE", + "CREATE PROCEDURE", "ALTER PROCEDURE", "DROP PROCEDURE", + "CREATE FUNCTION", "ALTER FUNCTION", "DROP FUNCTION", + "CREATE TRIGGER", "ALTER TRIGGER", "DROP TRIGGER", + None, None, None, # gap for TRIGGER_TYPE_MASK - 3 bits + "CREATE EXCEPTION", "ALTER EXCEPTION", "DROP EXCEPTION", + "CREATE VIEW", "ALTER VIEW", "DROP VIEW", + "CREATE DOMAIN", "ALTER DOMAIN", "DROP DOMAIN", + "CREATE ROLE", "ALTER ROLE", "DROP ROLE", + "CREATE INDEX", "ALTER INDEX", "DROP INDEX", + "CREATE SEQUENCE", "ALTER SEQUENCE", "DROP SEQUENCE", + "CREATE USER", "ALTER USER", "DROP USER", + "CREATE COLLATION", "DROP COLLATION", "ALTER CHARACTER SET", + "CREATE PACKAGE", "ALTER PACKAGE", "DROP PACKAGE", + "CREATE PACKAGE BODY", "DROP PACKAGE BODY", + "CREATE MAPPING", "ALTER MAPPING", "DROP MAPPING"] + +# Collation parameters codes COLLATION_PAD_SPACE = 1 COLLATION_CASE_INSENSITIVE = 2 COLLATION_ACCENT_INSENSITIVE = 4 -INDEX_TYPE_ASCENDING = 'ASCENDING' +# Index type names +INDEX_TYPE_ASCENDING = 'ASCENDING' INDEX_TYPE_DESCENDING = 'DESCENDING' -INDEX_TYPES = [INDEX_TYPE_ASCENDING,INDEX_TYPE_DESCENDING] +INDEX_TYPES = [INDEX_TYPE_ASCENDING, INDEX_TYPE_DESCENDING] +# Relation type codes RELATION_TYPE_TABLE = 0 RELATION_TYPE_VIEW = 1 RELATION_TYPE_GTT = 5 RELATION_TYPE_GTT_PRESERVE = 4 RELATION_TYPE_GTT_DELETE = 5 +# Procedure parameter type codes PROCPAR_DATATYPE = 0 PROCPAR_DOMAIN = 1 PROCPAR_TYPE_OF_DOMAIN = 2 PROCPAR_TYPE_OF_COLUMN = 3 -RESERVED = ['ACTIVE','ADD','ADMIN','AFTER','ALL','ALTER','AND', - 'ANY','ARE','AS','ASC','ASCENDING','AT','AUTO','AUTODDL','AVG', - 'BASED','BASE_NAME','BEFORE','BEGIN','BETWEEN','BIGINT','BIT_LENGTH', - 'BLOB','BLOBEDIT','BOTH','BUFFER','BY','BOOLEAN', - 'CASE','CAST','CHAR','CHARACTER','CHAR_LENGTH','CHARACTER_LENGTH', - 'CHECK','CHECK_POINT_LENGTH','CLOSE','COALESCE','COLLATE','COLLATION', - 'COLUMN','COMMIT','COMMITTED','COMPILETIME','COMPUTED','CONDITIONAL', - 'CONNECT','CONSTRAINT','CONTAINING','CONTINUE','COUNT','CREATE','CROSS', - 'CSTRING','CURRENT','CURRENT_CONNECTION','CURRENT_DATE','CURRENT_ROLE', - 'CURRENT_TIME','CURRENT_TIMESTAMP','CURRENT_TRANSACTION','CURRENT_USER', - 'CORR','COVAR_POP','COVAR_SAMP', - 'DATABASE','DATE','DAY','DB_KEY','DEBUG','DEC','DECIMAL','DECLARE','DEFAULT', - 'DELETE','DELETING','DESC','DESCENDING','DESCRIBE','DISCONNECT', - 'DISPLAY','DISTINCT','DO','DOMAIN','DOUBLE','DROP','DETERMINISTIC', - 'ECHO','EDIT','ELSE','END','ENTRY_POINT','ESCAPE','EVENT','EXCEPTION','EXECUTE', - 'EXISTS','EXIT','EXTERN','EXTERNAL','EXTRACT', - 'FETCH','FILE','FILTER','FLOAT','FOR','FOREIGN','FOUND','FROM','FULL','FUNCTION', - 'FALSE', - 'GDSCODE','GENERATOR','GEN_ID','GOTO','GRANT','GROUP','GROUP_COMMIT_WAIT_TIME', +# Section codes for Schema.get_metadata_ddl() +SCRIPT_COLLATIONS = 1 +SCRIPT_CHARACTER_SETS = 2 +SCRIPT_UDFS = 3 +SCRIPT_GENERATORS = 4 +SCRIPT_EXCEPTIONS = 5 +SCRIPT_DOMAINS = 6 +SCRIPT_PACKAGE_DEFS = 7 +SCRIPT_FUNCTION_DEFS = 8 +SCRIPT_PROCEDURE_DEFS = 9 +SCRIPT_TABLES = 10 +SCRIPT_PRIMARY_KEYS = 11 +SCRIPT_UNIQUE_CONSTRAINTS = 12 +SCRIPT_CHECK_CONSTRAINTS = 13 +SCRIPT_FOREIGN_CONSTRAINTS = 14 +SCRIPT_INDICES = 15 +SCRIPT_VIEWS = 16 +SCRIPT_PACKAGE_BODIES = 17 +SCRIPT_PROCEDURE_BODIES = 18 +SCRIPT_FUNCTION_BODIES = 19 +SCRIPT_TRIGGERS = 20 +SCRIPT_ROLES = 21 +SCRIPT_GRANTS = 22 +SCRIPT_COMMENTS = 23 +SCRIPT_SHADOWS = 24 +SCRIPT_SET_GENERATORS = 25 +SCRIPT_INDEX_DEACTIVATIONS = 26 +SCRIPT_INDEX_ACTIVATIONS = 27 +SCRIPT_TRIGGER_DEACTIVATIONS = 28 +SCRIPT_TRIGGER_ACTIVATIONS = 29 + +# Schema information collection codes +SCHEMA_TABLES = 1 +SCHEMA_VIEWS = 2 +SCHEMA_DOMAINS = 3 +SCHEMA_INDICES = 4 +SCHEMA_DEPENDENCIES = 5 +SCHEMA_GENERATORS = 6 +SCHEMA_SEQUENCES = 6 +SCHEMA_TRIGGERS = 7 +SCHEMA_PROCEDURES = 8 +SCHEMA_CONSTRAINTS = 9 +SCHEMA_COLLATIONS = 10 +SCHEMA_CHARACTER_SETS = 11 +SCHEMA_EXCEPTIONS = 12 +SCHEMA_ROLES = 13 +SCHEMA_FUNCTIONS = 14 +SCHEMA_FILES = 15 +SCHEMA_SHADOWS = 16 +SCHEMA_PRIVILEGES = 17 +SCHEMA_USERS = 18 +SCHEMA_PACKAGES = 19 +SCHEMA_BACKUP_HISTORY = 20 +SCHEMA_FILTERS = 21 + +# List of default sections (in order) for Schema.get_metadata_ddl() +SCRIPT_DEFAULT_ORDER = [SCRIPT_COLLATIONS, SCRIPT_CHARACTER_SETS, + SCRIPT_UDFS, SCRIPT_GENERATORS, + SCRIPT_EXCEPTIONS, SCRIPT_DOMAINS, + SCRIPT_PACKAGE_DEFS, + SCRIPT_FUNCTION_DEFS, SCRIPT_PROCEDURE_DEFS, + SCRIPT_TABLES, SCRIPT_PRIMARY_KEYS, + SCRIPT_UNIQUE_CONSTRAINTS, + SCRIPT_CHECK_CONSTRAINTS, + SCRIPT_FOREIGN_CONSTRAINTS, SCRIPT_INDICES, + SCRIPT_VIEWS, SCRIPT_PACKAGE_BODIES, + SCRIPT_PROCEDURE_BODIES, + SCRIPT_FUNCTION_BODIES, SCRIPT_TRIGGERS, + SCRIPT_GRANTS, SCRIPT_ROLES, SCRIPT_COMMENTS, + SCRIPT_SHADOWS, SCRIPT_SET_GENERATORS] + +# List of reserved Firebird words +RESERVED = ['ACTIVE', 'ADD', 'ADMIN', 'AFTER', 'ALL', 'ALTER', 'AND', + 'ANY', 'ARE', 'AS', 'ASC', 'ASCENDING', 'AT', 'AUTO', 'AUTODDL', 'AVG', + 'BASED', 'BASE_NAME', 'BEFORE', 'BEGIN', 'BETWEEN', 'BIGINT', 'BIT_LENGTH', + 'BLOB', 'BLOBEDIT', 'BOTH', 'BUFFER', 'BY', 'BOOLEAN', + 'CASE', 'CAST', 'CHAR', 'CHARACTER', 'CHAR_LENGTH', 'CHARACTER_LENGTH', + 'CHECK', 'CHECK_POINT_LENGTH', 'CLOSE', 'COALESCE', 'COLLATE', 'COLLATION', + 'COLUMN', 'COMMIT', 'COMMITTED', 'COMPILETIME', 'COMPUTED', 'CONDITIONAL', + 'CONNECT', 'CONSTRAINT', 'CONTAINING', 'CONTINUE', 'COUNT', 'CREATE', 'CROSS', + 'CSTRING', 'CURRENT', 'CURRENT_CONNECTION', 'CURRENT_DATE', 'CURRENT_ROLE', + 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_TRANSACTION', 'CURRENT_USER', + 'CORR', 'COVAR_POP', 'COVAR_SAMP', + 'DATABASE', 'DATE', 'DAY', 'DB_KEY', 'DEBUG', 'DEC', 'DECIMAL', 'DECLARE', 'DEFAULT', + 'DELETE', 'DELETING', 'DESC', 'DESCENDING', 'DESCRIBE', 'DISCONNECT', + 'DISPLAY', 'DISTINCT', 'DO', 'DOMAIN', 'DOUBLE', 'DROP', 'DETERMINISTIC', + 'ECHO', 'EDIT', 'ELSE', 'END', 'ENTRY_POINT', 'ESCAPE', 'EVENT', 'EXCEPTION', 'EXECUTE', + 'EXISTS', 'EXIT', 'EXTERN', 'EXTERNAL', 'EXTRACT', + 'FETCH', 'FILE', 'FILTER', 'FLOAT', 'FOR', 'FOREIGN', 'FOUND', 'FROM', 'FULL', + 'FUNCTION', 'FALSE', + 'GDSCODE', 'GENERATOR', 'GEN_ID', 'GOTO', 'GRANT', 'GROUP', 'GROUP_COMMIT_WAIT_TIME', 'GLOBAL', - 'HAVING','HEADING','HELP','HOUR', - 'IF','IMMEDIATE','IN','INACTIVE','INDEX','INDICATOR','INIT','INNER','INPUT', - 'INPUT_TYPE','INSERT','INSERTING','INT','INTEGER','INTO','IS','ISOLATION', + 'HAVING', 'HEADING', 'HELP', 'HOUR', + 'IF', 'IMMEDIATE', 'IN', 'INACTIVE', 'INDEX', 'INDICATOR', 'INIT', 'INNER', 'INPUT', + 'INPUT_TYPE', 'INSERT', 'INSERTING', 'INT', 'INTEGER', 'INTO', 'IS', 'ISOLATION', 'INSENSITIVE', 'JOIN', 'KEY', - 'LAST','LC_MESSAGES','LC_TYPE','LEADING','LEAVE','LEFT','LENGTH', - 'LEVEL','LIKE','LOCK','LOG_BUFFER_SIZE','LONG','LOWER', - 'MANUAL','MAX','MAXIMUM','MAXIMUM_SEGMENT','MAX_SEGMENT','MERGE','MESSAGE', - 'MIN','MINIMUM','MINUTE','MODULE_NAME','MONTH', - 'NAMES','NATIONAL','NATURAL','NCHAR','NO','NOAUTO','NOT','NULL','NULLIF', - 'NULLS','NUM_LOG_BUFFERS','NUMERIC', - 'OCTET_LENGTH','OF','ON','ONLY','OPEN','OPTION','OR','ORDER','OUTER','OUTPUT', - 'OUTPUT_TYPE','OVERFLOW','OFFSET','OVER', - 'PAGE','PAGELENGTH','PAGES','PAGE_SIZE','PARAMETER','PASSWORD','PERCENT', - 'PLAN','POSITION','POST_EVENT','PRECISION','PREPARE','PRIMARY','PRIVILEGES', - 'PROCEDURE','PUBLIC', + 'LAST', 'LC_MESSAGES', 'LC_TYPE', 'LEADING', 'LEAVE', 'LEFT', 'LENGTH', + 'LEVEL', 'LIKE', 'LOCK', 'LOG_BUFFER_SIZE', 'LONG', 'LOWER', + 'MANUAL', 'MAX', 'MAXIMUM', 'MAXIMUM_SEGMENT', 'MAX_SEGMENT', 'MERGE', 'MESSAGE', + 'MIN', 'MINIMUM', 'MINUTE', 'MODULE_NAME', 'MONTH', + 'NAMES', 'NATIONAL', 'NATURAL', 'NCHAR', 'NO', 'NOAUTO', 'NOT', 'NULL', 'NULLIF', + 'NULLS', 'NUM_LOG_BUFFERS', 'NUMERIC', + 'OCTET_LENGTH', 'OF', 'ON', 'ONLY', 'OPEN', 'OPTION', 'OR', 'ORDER', 'OUTER', 'OUTPUT', + 'OUTPUT_TYPE', 'OVERFLOW', 'OFFSET', 'OVER', + 'PAGE', 'PAGELENGTH', 'PAGES', 'PAGE_SIZE', 'PARAMETER', 'PASSWORD', 'PERCENT', + 'PLAN', 'POSITION', 'POST_EVENT', 'PRECISION', 'PREPARE', 'PRIMARY', 'PRIVILEGES', + 'PROCEDURE', 'PUBLIC', 'QUIT', - 'RDB$DB_KEY','READ','REAL','RECORD_VERSION','RECREATE','REFERENCES','RELEASE', - 'RESERV','RESERVING','RETAIN','RETURN','RETURNING_VALUES','RETURNS','REVOKE', - 'RIGHT','ROLLBACK','ROW_COUNT','ROWS','RUNTIME','RECURSIVE','RDB$RECORD_VERSION', - 'REGR_AVGX','REGR_AVGY','REGR_COUNT','REGR_INTERCEPT','REGR_R2','REGR_SLOPE', - 'REGR_SXX','REGR_SXY','REGR_SYY','ROW', - 'SAVEPOINT','SCHEMA','SECOND','SELECT','SET','SHADOW','SHARED','SHELL', - 'SHOW','SIMILAR','SINGULAR','SIZE','SMALLINT','SNAPSHOT','SOME','SORT','SQL', - 'SQLCODE','SQLERROR','SQLWARNING','STABILITY','STARTING','STARTS','STATEMENT', - 'STATIC','STATISTICS','SUB_TYPE','SUM','SUSPEND','SENSITIVE','START','SCROLL', - 'SQLSTATE','STDDEV_POP','STDDEV_SAMP', - 'TABLE','TERM','TERMINATOR','THEN','TIES','TIME','TIMESTAMP','TO','TRAILING', - 'TRANSACTION','TRANSLATE','TRANSLATION','TRIGGER','TRIM','TRUE', - 'UNCOMMITTED','UNION','UNIQUE','UNKNOWN','UPDATE','UPDATING','UPPER','USER', + 'RDB$DB_KEY', 'READ', 'REAL', 'RECORD_VERSION', 'RECREATE', 'REFERENCES', 'RELEASE', + 'RESERV', 'RESERVING', 'RETAIN', 'RETURN', 'RETURNING_VALUES', 'RETURNS', 'REVOKE', + 'RIGHT', 'ROLLBACK', 'ROW_COUNT', 'ROWS', 'RUNTIME', 'RECURSIVE', 'RDB$RECORD_VERSION', + 'REGR_AVGX', 'REGR_AVGY', 'REGR_COUNT', 'REGR_INTERCEPT', 'REGR_R2', 'REGR_SLOPE', + 'REGR_SXX', 'REGR_SXY', 'REGR_SYY', 'ROW', + 'SAVEPOINT', 'SCHEMA', 'SECOND', 'SELECT', 'SET', 'SHADOW', 'SHARED', 'SHELL', + 'SHOW', 'SIMILAR', 'SINGULAR', 'SIZE', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SORT', 'SQL', + 'SQLCODE', 'SQLERROR', 'SQLWARNING', 'STABILITY', 'STARTING', 'STARTS', 'STATEMENT', + 'STATIC', 'STATISTICS', 'SUB_TYPE', 'SUM', 'SUSPEND', 'SENSITIVE', 'START', 'SCROLL', + 'SQLSTATE', 'STDDEV_POP', 'STDDEV_SAMP', + 'TABLE', 'TERM', 'TERMINATOR', 'THEN', 'TIES', 'TIME', 'TIMESTAMP', 'TO', 'TRAILING', + 'TRANSACTION', 'TRANSLATE', 'TRANSLATION', 'TRIGGER', 'TRIM', 'TRUE', + 'UNCOMMITTED', 'UNION', 'UNIQUE', 'UNKNOWN', 'UPDATE', 'UPDATING', 'UPPER', 'USER', 'USING', - 'VALUE','VALUES','VARCHAR','VARIABLE','VARYING','VERSION','VIEW','VAR_POP', + 'VALUE', 'VALUES', 'VARCHAR', 'VARIABLE', 'VARYING', 'VERSION', 'VIEW', 'VAR_POP', 'VAR_SAMP', - 'WAIT','WHEN','WHENEVER','WHERE','WHILE','WITH','WORK','WRITE', - 'YEAR', - ] -NON_RESERVED = ['ABS','ACCENT','ACOS','ALWAYS','ASCII_CHAR','ASCII_VAL','ASIN','ATAN','ATAN2', - 'AUTONOMOUS','ACTION','ABSOLUTE','ACOSH','ASINH','ATANH', - 'BIN_AND','BIN_OR','BIN_NOT','BIN_SHL','BIN_SHR','BIN_XOR', - 'BLOCK','BACKUP','BREAK','BODY', + 'WAIT', 'WHEN', 'WHENEVER', 'WHERE', 'WHILE', 'WITH', 'WORK', 'WRITE', + 'YEAR'] +# List of non-reserved Firebird words +NON_RESERVED = ['ABS', 'ACCENT', 'ACOS', 'ALWAYS', 'ASCII_CHAR', 'ASCII_VAL', 'ASIN', 'ATAN', + 'ATAN2', 'AUTONOMOUS', 'ACTION', 'ABSOLUTE', 'ACOSH', 'ASINH', 'ATANH', + 'BIN_AND', 'BIN_OR', 'BIN_NOT', 'BIN_SHL', 'BIN_SHR', 'BIN_XOR', + 'BLOCK', 'BACKUP', 'BREAK', 'BODY', #removed 'BASENAME', - 'CALLER','CEIL','CEILING','CHAR_TO_UUID','CASCADE','COMMENT','COMMON', - 'COS','COSH','COT','CURSOR','CONTINUE', + 'CALLER', 'CEIL', 'CEILING', 'CHAR_TO_UUID', 'CASCADE', 'COMMENT', 'COMMON', + 'COS', 'COSH', 'COT', 'CURSOR', 'CONTINUE', #removed 'CACHE','CHECK_POINT_LEN', - 'DATEADD','DATEDIFF','DECODE','DIFFERENCE','DATA','DESCRIPTOR','DDL','DECRYPT', - 'DENSE_RANK', - 'EXP','ENCRYPT','ENGINE', - 'FIRSTNAME','FLOOR','FIRST','FREE_IT','FIRST_VALUE', - 'GEN_UUID','GENERATED','GRANTED', + 'DATEADD', 'DATEDIFF', 'DECODE', 'DIFFERENCE', 'DATA', 'DESCRIPTOR', 'DDL', + 'DECRYPT', 'DENSE_RANK', + 'EXP', 'ENCRYPT', 'ENGINE', + 'FIRSTNAME', 'FLOOR', 'FIRST', 'FREE_IT', 'FIRST_VALUE', + 'GEN_UUID', 'GENERATED', 'GRANTED', #removed 'GROUP_COMMIT_WAIT', 'HASH', - 'IGNORE','IIF','IDENTITY','INCREMENT', - 'LIMBO','LIST','LN','LOG','LOG10','LPAD','LASTNAME','LAST_VALUE','LAG','LEAD', - 'LINGER', + 'IGNORE', 'IIF', 'IDENTITY', 'INCREMENT', + 'LIMBO', 'LIST', 'LN', 'LOG', 'LOG10', 'LPAD', 'LASTNAME', 'LAST_VALUE', 'LAG', + 'LEAD', 'LINGER', #removed 'LOGFILE','LOG_BUF_SEZE', - 'MAPPING','MATCHED','MATCHING','MAXVALUE','MIDDLENAME','MILLISECOND', - 'MINVALUE','MOD', - 'NEXT','NAME','NTH_VALUE', + 'MAPPING', 'MATCHED', 'MATCHING', 'MAXVALUE', 'MIDDLENAME', 'MILLISECOND', + 'MINVALUE', 'MOD', + 'NEXT', 'NAME', 'NTH_VALUE', #removed 'NUM_LOG_BUFS', - 'OS_NAME','OVERLAY', - 'PI','PLACING','POWER','PROTECTED','PAD','PRESERVE','PACKAGE','PARTITION', - 'PLUGIN','PRIOR', - 'REPLACE','REQUESTS','RESTART','RETURNING','REVERSE','ROUND','RPAD','RAND', - 'RESTRICT','ROLE','RANK','RELATIVE','ROW_NUMBER', + 'OS_NAME', 'OVERLAY', + 'PI', 'PLACING', 'POWER', 'PROTECTED', 'PAD', 'PRESERVE', 'PACKAGE', 'PARTITION', + 'PLUGIN', 'PRIOR', + 'REPLACE', 'REQUESTS', 'RESTART', 'RETURNING', 'REVERSE', 'ROUND', 'RPAD', 'RAND', + 'RESTRICT', 'ROLE', 'RANK', 'RELATIVE', 'ROW_NUMBER', #removed 'RAW_PARTITIONS', - 'SEGMENT','SEQUENCE','SIGN','SIN','SINH','SOURCE','SPACE','SQLSTATE','SQRT', - 'SCALAR_ARRAY','SKIP','SUBSTRING','SERVERWIDE', - 'TIMEOUT','TRUNC','TWO_PHASE','TAN','TANH','TYPE','TEMPORARY','TAGS','TRUSTED', - 'UUID_TO_CHAR','UNDO','USAGE', - 'WEEK','WEEKDAY', - 'YEARDAY' - ] + 'SEGMENT', 'SEQUENCE', 'SIGN', 'SIN', 'SINH', 'SOURCE', 'SPACE', 'SQLSTATE', 'SQRT', + 'SCALAR_ARRAY', 'SKIP', 'SUBSTRING', 'SERVERWIDE', + 'TIMEOUT', 'TRUNC', 'TWO_PHASE', 'TAN', 'TANH', 'TYPE', 'TEMPORARY', 'TAGS', + 'TRUSTED', + 'UUID_TO_CHAR', 'UNDO', 'USAGE', + 'WEEK', 'WEEKDAY', + 'YEARDAY'] #--- Functions -def get_grants(privileges,grantors=None): +def get_grants(privileges, grantors=None): """Get list of minimal set of SQL GRANT statamenets necessary to grant specified privileges. @@ -268,15 +334,15 @@ :param list grantors: List of standard grantor names. Generates GRANTED BY clause for privileges granted by user that's not in list. """ - tp = {'S':'SELECT','I':'INSERT','U':'UPDATE','D':'DELETE','R':'REFERENCES'} + tp = {'S':'SELECT', 'I':'INSERT', 'U':'UPDATE', 'D':'DELETE', 'R':'REFERENCES'} def skey(item): - return (item.user_name,item.user_type,item.grantor_name, - item.subject_name,item.subject_type,item.has_grant(), - item.privilege in tp,item.privilege, str(item.field_name),) + return (item.user_name, item.user_type, item.grantor_name, + item.subject_name, item.subject_type, item.has_grant(), + item.privilege in tp, item.privilege, str(item.field_name),) def gkey(item): - return (item.user_name,item.user_type,item.grantor_name, - item.subject_name,item.subject_type,item.has_grant(), + return (item.user_name, item.user_type, item.grantor_name, + item.subject_name, item.subject_type, item.has_grant(), item.privilege in tp,) def gkey2(item): return item.privilege @@ -284,7 +350,7 @@ grants = [] p = list(privileges) p.sort(key=skey) - for k, g in groupby(p,gkey): + for k, g in groupby(p, gkey): g = list(g) item = g[0] if item.has_grant(): @@ -294,11 +360,11 @@ admin_option = '' uname = item.user_name user = item.user - if isinstance(user,Procedure): + if isinstance(user, Procedure): utype = 'PROCEDURE ' - elif isinstance(user,Trigger): + elif isinstance(user, Trigger): utype = 'TRIGGER ' - elif isinstance(user,View): + elif isinstance(user, View): utype = 'VIEW ' else: utype = '' @@ -308,7 +374,7 @@ else: granted_by = '' priv_list = [] - for k,items in groupby(g,gkey2): + for k, items in groupby(g, gkey2): items = list(items) item = items[0] if item.privilege in tp: @@ -324,26 +390,27 @@ privilege = '' if priv_list: privilege = ', '.join(priv_list) - privilege += ' ON ' - grants.append('GRANT %s%s TO %s%s%s%s' % (privilege,sname,utype, - uname,admin_option,granted_by)) + privilege += ' ON ' + grants.append('GRANT %s%s TO %s%s%s%s' % (privilege, sname, utype, uname, admin_option, + granted_by)) return grants -def isKeyword(ident): - "Returns True if `ident` is Firebird keyword." +def iskeyword(ident): + "Return True if `ident` is (any) Firebird keyword." return (ident in RESERVED) or (ident in NON_RESERVED) def escape_single_quotes(text): - return text.replace("'","''") + "Return `text` with any single quotes escaped (doubled)." + return text.replace("'", "''") -#--- Exceptions #--- Classes - -class Schema(object): +class Schema(Visitable): """This class represents database schema. """ #: option switch: Always quote db object names on output opt_always_quote = False + #: option switch: Keyword for generator/sequence + opt_generator_keyword = 'SEQUENCE' #: Datatype declaration methods for procedure parameters: key = numID, value = name enum_param_type_from = {PROCPAR_DATATYPE: 'DATATYPE', PROCPAR_DOMAIN: 'DOMAIN', @@ -399,11 +466,10 @@ self._con = None self._ic = None self.__internal = False + self.__clear() def __del__(self): if not self.closed: self._close() - def __get_closed(self): - return self._con is None def __fail_if_closed(self): if self.closed: raise fdb.ProgrammingError("Schema is not binded to connection.") @@ -416,83 +482,88 @@ :meth:`bind` and :meth:`close`.""" self.__internal = True self._con = weakref.proxy(self._con) - def __object_by_name(self,list,name): - if name is None: return None - for o in list: + def __object_by_name(self, _list, name): + if name is None: + return None + for o in _list: if o.name == name: return o return None - def __clear(self,data=None): + def __clear(self, data=None): if data: - data = data.lower() - if data not in ['tables','views','domains','indices','dependencies', - 'generators','sequences','triggers','procedures', - 'constraints','collations','character sets', - 'exceptions','roles','functions','files','shadows', - 'privileges','users','packages']: - raise fdb.ProgrammingError("Unknown metadata category '%s'" % data) - if (not data or data == 'tables'): - self.__tables = None - if (not data or data == 'views'): - self.__views = None - if (not data or data == 'domains'): - self.__domains = None - if (not data or data == 'indices'): - self.__indices = None - self.__constraint_indices = None - if (not data or data == 'dependencies'): - self.__dependencies = None - if (not data or data in ['generators','sequences']): - self.__generators = None - if (not data or data == 'triggers'): - self.__triggers = None - if (not data or data == 'procedures'): - self.__procedures = None - if (not data or data == 'constraints'): - self.__constraints = None - if (not data or data == 'collations'): - self.__collations = None - if (not data or data == 'character sets'): - self.__character_sets = None - if (not data or data == 'exceptions'): - self.__exceptions = None - if (not data or data == 'roles'): - self.__roles = None - if (not data or data == 'functions'): - self.__functions = None - if (not data or data == 'files'): - self.__files = None - if (not data or data == 'shadows'): - self.__shadows = None - if (not data or data == 'privileges'): - self.__privileges = None - if (not data or data == 'users'): - self.__users = None - if (not data or data == 'packages'): - self.__packages = None + if not isinstance(data, collections.Iterable): + data = (data, ) + else: + data = range(1, SCHEMA_FILTERS + 1) + for item in data: + if item == SCHEMA_TABLES: + self.__tables = None + elif item == SCHEMA_VIEWS: + self.__views = None + elif item == SCHEMA_DOMAINS: + self.__domains = None + elif item == SCHEMA_INDICES: + self.__indices = None + self.__constraint_indices = None + elif item == SCHEMA_DEPENDENCIES: + self.__dependencies = None + elif item == SCHEMA_GENERATORS: + self.__generators = None + elif item == SCHEMA_TRIGGERS: + self.__triggers = None + elif item == SCHEMA_PROCEDURES: + self.__procedures = None + elif item == SCHEMA_CONSTRAINTS: + self.__constraints = None + elif item == SCHEMA_COLLATIONS: + self.__collations = None + elif item == SCHEMA_CHARACTER_SETS: + self.__character_sets = None + elif item == SCHEMA_EXCEPTIONS: + self.__exceptions = None + elif item == SCHEMA_ROLES: + self.__roles = None + elif item == SCHEMA_FUNCTIONS: + self.__functions = None + elif item == SCHEMA_FILES: + self.__files = None + elif item == SCHEMA_SHADOWS: + self.__shadows = None + elif item == SCHEMA_PRIVILEGES: + self.__privileges = None + elif item == SCHEMA_USERS: + self.__users = None + elif item == SCHEMA_PACKAGES: + self.__packages = None + elif item == SCHEMA_BACKUP_HISTORY: + self.__backup_history = None + elif item == SCHEMA_FILTERS: + self.__filters = None + else: + raise fdb.ProgrammingError("Unknown metadata category '%d'" % item) #--- protected - def _select_row(self,cmd,params=None): + def _select_row(self, cmd, params=None): if params: - self._ic.execute(cmd,params) + self._ic.execute(cmd, params) else: self._ic.execute(cmd) return self._ic.fetchonemap() - def _select(self,cmd,params=None): + def _select(self, cmd, params=None): if params: - self._ic.execute(cmd,params) + self._ic.execute(cmd, params) else: self._ic.execute(cmd) return self._ic.itermap() - def _get_field_dimensions(self,field): - return [(r[0],r[1]) for r in + def _get_field_dimensions(self, field): + return [(r[0], r[1]) for r in self._ic.execute("""SELECT r.RDB$LOWER_BOUND, r.RDB$UPPER_BOUND FROM RDB$FIELD_DIMENSIONS r where r.RDB$FIELD_NAME = '%s' order by r.RDB$DIMENSION""" % field.name)] - def _get_item(self,name,itype,subname=None): + def _get_item(self, name, itype, subname=None): if itype == 0: # Relation return self.get_table(name) elif itype == 1: # View @@ -502,7 +573,7 @@ elif itype == 5: # Procedure return self.get_procedure(name) elif itype == 8: # User - result = self.__object_by_name(self._get_users(),name) + result = self.__object_by_name(self._get_users(), name) if not result: result = fdb.services.User(name) self.__users.append(result) @@ -519,84 +590,90 @@ return self.get_function(name) elif itype == 17: # Collation return self.get_collation(name) - elif itype in [17,18]: # Package + elif itype in [17, 18]: # Package return self.get_package(name) else: raise fdb.ProgrammingError('Unsupported subject type') #--- special attribute access methods - def _get_description(self): - return self.__description def _get_default_character_set(self): return self.get_character_set(self._default_charset_name) - def _get_owner_name(self): - return self.__owner - def _get_security_class(self): - return self.__security_class def _get_collations(self): if self.__collations is None: self.__fail_if_closed() self._ic.execute("select * from rdb$collations") - self.__collations = [Collation(self,row) for row in self._ic.itermap()] + self.__collations = ObjectList((Collation(self, row) for row in self._ic.itermap()), + Collation, 'item.name') + self.__collations.freeze() return self.__collations def _get_character_sets(self): if self.__character_sets is None: self.__fail_if_closed() self._ic.execute("select * from rdb$character_sets") - self.__character_sets = [CharacterSet(self,row) for row in self._ic.itermap()] + self.__character_sets = ObjectList((CharacterSet(self, row) for row in self._ic.itermap()), + CharacterSet, 'item.name') + self.__character_sets.freeze() return self.__character_sets def _get_exceptions(self): if self.__exceptions is None: self.__fail_if_closed() self._ic.execute("select * from rdb$exceptions") - self.__exceptions = [DatabaseException(self,row) for row in self._ic.itermap()] + self.__exceptions = ObjectList((DatabaseException(self, row) for row in self._ic.itermap()), + DatabaseException, 'item.name') + self.__exceptions.freeze() return self.__exceptions def _get_all_domains(self): if self.__domains is None: self.__fail_if_closed() - cols = ['RDB$FIELD_NAME','RDB$VALIDATION_SOURCE','RDB$COMPUTED_SOURCE', - 'RDB$DEFAULT_SOURCE','RDB$FIELD_LENGTH','RDB$FIELD_SCALE', - 'RDB$FIELD_TYPE','RDB$FIELD_SUB_TYPE','RDB$DESCRIPTION', - 'RDB$SYSTEM_FLAG','RDB$SEGMENT_LENGTH','RDB$EXTERNAL_LENGTH', - 'RDB$EXTERNAL_SCALE','RDB$EXTERNAL_TYPE','RDB$DIMENSIONS', - 'RDB$NULL_FLAG','RDB$CHARACTER_LENGTH','RDB$COLLATION_ID', - 'RDB$CHARACTER_SET_ID','RDB$FIELD_PRECISION'] + cols = ['RDB$FIELD_NAME', 'RDB$VALIDATION_SOURCE', 'RDB$COMPUTED_SOURCE', + 'RDB$DEFAULT_SOURCE', 'RDB$FIELD_LENGTH', 'RDB$FIELD_SCALE', + 'RDB$FIELD_TYPE', 'RDB$FIELD_SUB_TYPE', 'RDB$DESCRIPTION', + 'RDB$SYSTEM_FLAG', 'RDB$SEGMENT_LENGTH', 'RDB$EXTERNAL_LENGTH', + 'RDB$EXTERNAL_SCALE', 'RDB$EXTERNAL_TYPE', 'RDB$DIMENSIONS', + 'RDB$NULL_FLAG', 'RDB$CHARACTER_LENGTH', 'RDB$COLLATION_ID', + 'RDB$CHARACTER_SET_ID', 'RDB$FIELD_PRECISION'] if self._con.ods >= fdb.ODS_FB_30: cols.extend(['RDB$SECURITY_CLASS', 'RDB$OWNER_NAME']) self._ic.execute("""select %s from RDB$FIELDS""" % ','.join(cols)) - self.__domains = [Domain(self,row) for row in self._ic.itermap()] + self.__domains = ObjectList((Domain(self, row) for row in self._ic.itermap()), + Domain, 'item.name') + self.__domains.freeze() return self.__domains def _get_domains(self): - return [d for d in self._get_all_domains() if not d.issystemobject()] + return self._get_all_domains().filter(lambda item: not item.issystemobject()) def _get_sysdomains(self): - return [d for d in self._get_all_domains() if d.issystemobject()] + return self._get_all_domains().filter(lambda item: item.issystemobject()) def _get_all_tables(self): if self.__tables is None: self.__fail_if_closed() self._ic.execute("select * from rdb$relations where rdb$view_blr is null") - self.__tables = [Table(self,row) for row in self._ic.itermap()] + self.__tables = ObjectList((Table(self, row) for row in self._ic.itermap()), + Table, 'item.name') + self.__tables.freeze() return self.__tables def _get_tables(self): - return [t for t in self._get_all_tables() if not t.issystemobject()] + return self._get_all_tables().filter(lambda item: not item.issystemobject()) def _get_systables(self): - return [t for t in self._get_all_tables() if t.issystemobject()] + return self._get_all_tables().filter(lambda item: item.issystemobject()) def _get_all_views(self): if self.__views is None: self.__fail_if_closed() self._ic.execute("select * from rdb$relations where rdb$view_blr is not null") - self.__views = [View(self,row) for row in self._ic.itermap()] + self.__views = ObjectList((View(self, row) for row in self._ic.itermap()), + View, 'item.name') + self.__views.freeze() return self.__views def _get_views(self): - return [v for v in self._get_all_views() if not v.issystemobject()] + return self._get_all_views().filter(lambda item: not item.issystemobject()) def _get_sysviews(self): - return [v for v in self._get_all_views() if v.issystemobject()] + return self._get_all_views().filter(lambda item: item.issystemobject()) def _get_constraint_indices(self): if self.__constraint_indices is None: self.__fail_if_closed() self._ic.execute("""select RDB$INDEX_NAME, RDB$CONSTRAINT_NAME from RDB$RELATION_CONSTRAINTS where RDB$INDEX_NAME is not null""") - self.__constraint_indices = dict([(key.strip(),value.strip()) for key, value in self._ic]) + self.__constraint_indices = dict((key.strip(), value.strip()) for key, value in self._ic) return self.__constraint_indices def _get_all_indices(self): if self.__indices is None: @@ -609,27 +686,31 @@ RDB$INDEX_ID, RDB$UNIQUE_FLAG, RDB$DESCRIPTION, RDB$SEGMENT_COUNT, RDB$INDEX_INACTIVE, RDB$INDEX_TYPE, RDB$FOREIGN_KEY, RDB$SYSTEM_FLAG, RDB$EXPRESSION_SOURCE, RDB$STATISTICS from RDB$INDICES""") - self.__indices = [Index(self,row) for row in self._ic.itermap()] + self.__indices = ObjectList((Index(self, row) for row in self._ic.itermap()), + Index, 'item.name') + self.__indices.freeze() return self.__indices def _get_indices(self): - return [i for i in self._get_all_indices() if not i.issystemobject()] + return self._get_all_indices().filter(lambda item: not item.issystemobject()) def _get_sysindices(self): - return [i for i in self._get_all_indices() if i.issystemobject()] + return self._get_all_indices().filter(lambda item: item.issystemobject()) def _get_all_generators(self): if self.__generators is None: self.__fail_if_closed() cols = ['RDB$GENERATOR_NAME', 'RDB$GENERATOR_ID', 'RDB$DESCRIPTION', 'RDB$SYSTEM_FLAG'] if self._con.ods >= fdb.ODS_FB_30: - cols.extend(['RDB$SECURITY_CLASS','RDB$OWNER_NAME','RDB$INITIAL_VALUE', + cols.extend(['RDB$SECURITY_CLASS', 'RDB$OWNER_NAME', 'RDB$INITIAL_VALUE', 'RDB$GENERATOR_INCREMENT']) self._ic.execute("select %s from rdb$generators" % ','.join(cols)) - self.__generators = [Sequence(self,row) for row in self._ic.itermap()] + self.__generators = ObjectList((Sequence(self, row) for row in self._ic.itermap()), + Sequence, 'item.name') + self.__generators.freeze() return self.__generators def _get_generators(self): - return [g for g in self._get_all_generators() if not g.issystemobject()] + return self._get_all_generators().filter(lambda item: not item.issystemobject()) def _get_sysgenerators(self): - return [g for g in self._get_all_generators() if g.issystemobject()] + return self._get_all_generators().filter(lambda item: item.issystemobject()) def _get_all_triggers(self): if self.__triggers is None: self.__fail_if_closed() @@ -639,12 +720,14 @@ if self._con.ods >= fdb.ODS_FB_30: cols.extend(['RDB$VALID_BLR', 'RDB$ENGINE_NAME', 'RDB$ENTRYPOINT']) self._ic.execute("select %s from RDB$TRIGGERS" % ','.join(cols)) - self.__triggers = [Trigger(self,row) for row in self._ic.itermap()] + self.__triggers = ObjectList((Trigger(self, row) for row in self._ic.itermap()), + Trigger, 'item.name') + self.__triggers.freeze() return self.__triggers def _get_triggers(self): - return [g for g in self._get_all_triggers() if not g.issystemobject()] + return self._get_all_triggers().filter(lambda item: not item.issystemobject()) def _get_systriggers(self): - return [g for g in self._get_all_triggers() if g.issystemobject()] + return self._get_all_triggers().filter(lambda item: item.issystemobject()) def _get_all_procedures(self): if self.__procedures is None: self.__fail_if_closed() @@ -652,17 +735,19 @@ 'RDB$PROCEDURE_OUTPUTS', 'RDB$DESCRIPTION', 'RDB$PROCEDURE_SOURCE', 'RDB$SECURITY_CLASS', 'RDB$OWNER_NAME', 'RDB$SYSTEM_FLAG'] if self._con.ods >= fdb.ODS_FB_21: - cols.extend(['RDB$PROCEDURE_TYPE','RDB$VALID_BLR']) + cols.extend(['RDB$PROCEDURE_TYPE', 'RDB$VALID_BLR']) if self._con.ods >= fdb.ODS_FB_30: cols.extend(['RDB$ENGINE_NAME', 'RDB$ENTRYPOINT', 'RDB$PACKAGE_NAME', 'RDB$PRIVATE_FLAG']) self._ic.execute("select %s from rdb$procedures" % ','.join(cols)) - self.__procedures = [Procedure(self,row) for row in self._ic.itermap()] + self.__procedures = ObjectList((Procedure(self, row) for row in self._ic.itermap()), + Procedure, 'item.name') + self.__procedures.freeze() return self.__procedures def _get_procedures(self): - return [p for p in self._get_all_procedures() if not p.issystemobject()] + return self._get_all_procedures().filter(lambda item: not item.issystemobject()) def _get_sysprocedures(self): - return [p for p in self._get_all_procedures() if p.issystemobject()] + return self._get_all_procedures().filter(lambda item: item.issystemobject()) def _get_constraints(self): if self.__constraints is None: self.__fail_if_closed() @@ -670,53 +755,64 @@ # Constraint.issystemobject() that is called in Constraint.__init__() # will drop result from internal cursor and we'll not load all constraints. self._get_all_tables() - self._ic.execute("""select * from rdb$relation_constraints C + self._ic.execute("""select c.RDB$CONSTRAINT_NAME, +c.RDB$CONSTRAINT_TYPE, c.RDB$RELATION_NAME, c.RDB$DEFERRABLE, +c.RDB$INITIALLY_DEFERRED, c.RDB$INDEX_NAME, r.RDB$CONST_NAME_UQ, +r.RDB$MATCH_OPTION,r.RDB$UPDATE_RULE,r.RDB$DELETE_RULE, +k.RDB$TRIGGER_NAME from rdb$relation_constraints C left outer join rdb$ref_constraints R on C.rdb$constraint_name = R.rdb$constraint_name -left outer join rdb$check_constraints K on C.rdb$constraint_name = K.rdb$constraint_name""") - self.__constraints = [Constraint(self,row) for row in self._ic.itermap()] +left outer join rdb$check_constraints K on (C.rdb$constraint_name = K.rdb$constraint_name) +and (c.RDB$CONSTRAINT_TYPE in ('CHECK','NOT NULL'))""") + self.__constraints = ObjectList((Constraint(self, row) for row in self._ic.itermap()), + Constraint, 'item.name') # Check constrains need special care because they're doubled # (select above returns two records for them with different trigger names) - checks = [c for c in self.__constraints if c.ischeck()] - self.__constraints = [c for c in self.__constraints if not c.ischeck()] + checks = self.__constraints.extract(lambda item: item.ischeck()) dchecks = {} for check in checks: - dchecks.setdefault(check.name,list()).append(check) + dchecks.setdefault(check.name, list()).append(check) for checklist in dchecks.values(): names = [c._attributes['RDB$TRIGGER_NAME'] for c in checklist] check = checklist[0] check._attributes['RDB$TRIGGER_NAME'] = names self.__constraints.append(check) + self.__constraints.freeze() return self.__constraints def _get_roles(self): if self.__roles is None: self.__fail_if_closed() self._ic.execute("select * from rdb$roles") - self.__roles = [Role(self,row) for row in self._ic.itermap()] + self.__roles = ObjectList((Role(self, row) for row in self._ic.itermap()), + Role, 'item.name') + self.__roles.freeze() return self.__roles def _get_dependencies(self): if self.__dependencies is None: self.__fail_if_closed() self._ic.execute("select * from rdb$dependencies") - self.__dependencies = [Dependency(self,row) for row in self._ic.itermap()] + self.__dependencies = ObjectList((Dependency(self, row) for row in self._ic.itermap()), + Dependency) return self.__dependencies def _get_all_functions(self): if self.__functions is None: self.__fail_if_closed() - cols = ['RDB$FUNCTION_NAME','RDB$FUNCTION_TYPE','RDB$MODULE_NAME', - 'RDB$ENTRYPOINT','RDB$DESCRIPTION','RDB$RETURN_ARGUMENT', + cols = ['RDB$FUNCTION_NAME', 'RDB$FUNCTION_TYPE', 'RDB$MODULE_NAME', + 'RDB$ENTRYPOINT', 'RDB$DESCRIPTION', 'RDB$RETURN_ARGUMENT', 'RDB$SYSTEM_FLAG'] if self._con.ods >= fdb.ODS_FB_30: - cols.extend(['RDB$ENGINE_NAME','RDB$PACKAGE_NAME','RDB$PRIVATE_FLAG', - 'RDB$FUNCTION_SOURCE','RDB$FUNCTION_ID','RDB$VALID_BLR', - 'RDB$SECURITY_CLASS','RDB$OWNER_NAME','RDB$LEGACY_FLAG', + cols.extend(['RDB$ENGINE_NAME', 'RDB$PACKAGE_NAME', 'RDB$PRIVATE_FLAG', + 'RDB$FUNCTION_SOURCE', 'RDB$FUNCTION_ID', 'RDB$VALID_BLR', + 'RDB$SECURITY_CLASS', 'RDB$OWNER_NAME', 'RDB$LEGACY_FLAG', 'RDB$DETERMINISTIC_FLAG']) self._ic.execute("select %s from rdb$functions" % ','.join(cols)) - self.__functions = [Function(self,row) for row in self._ic.itermap()] + self.__functions = ObjectList((Function(self, row) for row in self._ic.itermap()), + Function, 'item.name') + self.__functions.freeze() return self.__functions def _get_functions(self): - return [p for p in self._get_all_functions() if not p.issystemobject()] + return self._get_all_functions().filter(lambda item: not item.issystemobject()) def _get_sysfunctions(self): - return [p for p in self._get_all_functions() if p.issystemobject()] + return self._get_all_functions().filter(lambda item: item.issystemobject()) def _get_files(self): if self.__files is None: self.__fail_if_closed() @@ -724,7 +820,9 @@ RDB$FILE_START, RDB$FILE_LENGTH from RDB$FILES where RDB$SHADOW_NUMBER = 0 order by RDB$FILE_SEQUENCE""") - self.__files = [DatabaseFile(self,row) for row in self._ic.itermap()] + self.__files = ObjectList((DatabaseFile(self, row) for row in self._ic.itermap()), + DatabaseFile, 'item.name') + self.__files.freeze() return self.__files def _get_shadows(self): if self.__shadows is None: @@ -733,7 +831,9 @@ from RDB$FILES where RDB$SHADOW_NUMBER > 0 AND RDB$FILE_SEQUENCE = 0 order by RDB$SHADOW_NUMBER""") - self.__shadows = [Shadow(self,row) for row in self._ic.itermap()] + self.__shadows = ObjectList((Shadow(self, row) for row in self._ic.itermap()), + Shadow, 'item.name') + self.__shadows.freeze() return self.__shadows def _get_privileges(self): if self.__privileges is None: @@ -741,13 +841,34 @@ self._ic.execute("""select RDB$USER, RDB$GRANTOR, RDB$PRIVILEGE, RDB$GRANT_OPTION, RDB$RELATION_NAME, RDB$FIELD_NAME, RDB$USER_TYPE, RDB$OBJECT_TYPE FROM RDB$USER_PRIVILEGES""") - self.__privileges = [Privilege(self,row) for row in self._ic.itermap()] + self.__privileges = ObjectList((Privilege(self, row) for row in self._ic.itermap()), + Privilege) return self.__privileges + def _get_backup_history(self): + if self.__backup_history is None: + self.__fail_if_closed() + self._ic.execute("""SELECT RDB$BACKUP_ID, RDB$TIMESTAMP, +RDB$BACKUP_LEVEL, RDB$GUID, RDB$SCN, RDB$FILE_NAME +FROM RDB$BACKUP_HISTORY""") + self.__backup_history = ObjectList((BackupHistory(self, row) for row in self._ic.itermap()), + BackupHistory, 'item.name') + self.__backup_history.freeze() + return self.__backup_history + def _get_filters(self): + if self.__filters is None: + self.__fail_if_closed() + self._ic.execute("""SELECT RDB$FUNCTION_NAME, RDB$DESCRIPTION, +RDB$MODULE_NAME, RDB$ENTRYPOINT, RDB$INPUT_SUB_TYPE, RDB$OUTPUT_SUB_TYPE, RDB$SYSTEM_FLAG +FROM RDB$FILTERS""") + self.__filters = ObjectList((Filter(self, row) for row in self._ic.itermap()), + Filter, 'item.name') + self.__filters.freeze() + return self.__filters def _get_users(self): if self.__users is None: self.__fail_if_closed() self._ic.execute("select distinct(RDB$USER) FROM RDB$USER_PRIVILEGES") - self.__users = [fdb.services.User(row[0].strip()) for row in self._ic] + self.__users = ObjectList((fdb.services.User(row[0].strip()) for row in self._ic), fdb.services.User, 'item.name') return self.__users def _get_packages(self): if self.__packages is None: @@ -757,82 +878,53 @@ RDB$PACKAGE_BODY_SOURCE, RDB$VALID_BODY_FLAG, RDB$SECURITY_CLASS, RDB$OWNER_NAME, RDB$SYSTEM_FLAG, RDB$DESCRIPTION FROM RDB$PACKAGES""") - self.__packages = [Package(self,row) for row in self._ic.itermap()] + self.__packages = ObjectList((Package(self, row) for row in self._ic.itermap()), + Package, 'item.name') else: - self.__packages = [] + self.__packages = ObjectList(_cls=Package, key_expr='item.name') + self.__packages.freeze() return self.__packages - def _get_linger(self): - return self.__linger #--- Properties #: True if link to :class:`~fdb.Connection` is closed. - closed = property(__get_closed) - description = LateBindingProperty(_get_description,None,None, - "Database description or None if it doesn't have a description.") - owner_name = LateBindingProperty(_get_owner_name,None,None,"Database owner name.") - default_character_set = LateBindingProperty(_get_default_character_set,None,None, - "Default :class:`CharacterSet` for database") - security_class = LateBindingProperty(_get_security_class,None,None, - "Can refer to the security class applied as databasewide access control limits.") - collations = LateBindingProperty(_get_collations,None,None, - "List of all collations in database.\nItems are :class:`Collation` objects.") - character_sets = LateBindingProperty(_get_character_sets,None,None, - "List of all character sets in database.\nItems are :class:`CharacterSet` objects.") - exceptions = LateBindingProperty(_get_exceptions,None,None, - "List of all exceptions in database.\nItems are :class:`DatabaseException` objects.") - generators = LateBindingProperty(_get_generators,None,None, - "List of all user generators in database.\nItems are :class:`Sequence` objects.") - sysgenerators = LateBindingProperty(_get_sysgenerators,None,None, - "List of all system generators in database.\nItems are :class:`Sequence` objects.") - sequences = LateBindingProperty(_get_generators,None,None, - "List of all user generators in database.\nItems are :class:`Sequence` objects.") - syssequences = LateBindingProperty(_get_sysgenerators,None,None, - "List of all system generators in database.\nItems are :class:`Sequence` objects.") - domains = LateBindingProperty(_get_domains,None,None, - "List of all user domains in database.\nItems are :class:`Domain` objects.") - sysdomains = LateBindingProperty(_get_sysdomains,None,None, - "List of all system domains in database.\nItems are :class:`Domain` objects.") - indices = LateBindingProperty(_get_indices,None,None, - "List of all user indices in database.\nItems are :class:`Index` objects.") - sysindices = LateBindingProperty(_get_sysindices,None,None, - "List of all system indices in database.\nItems are :class:`Index` objects.") - tables = LateBindingProperty(_get_tables,None,None, - "List of all user tables in database.\nItems are :class:`Table` objects.") - systables = LateBindingProperty(_get_systables,None,None, - "List of all system tables in database.\nItems are :class:`Table` objects.") - views = LateBindingProperty(_get_views,None,None, - "List of all user views in database.\nItems are :class:`View` objects.") - sysviews = LateBindingProperty(_get_sysviews,None,None, - "List of all system views in database.\nItems are :class:`View` objects.") - triggers = LateBindingProperty(_get_triggers,None,None, - "List of all user triggers in database.\nItems are :class:`Trigger` objects.") - systriggers = LateBindingProperty(_get_systriggers,None,None, - "List of all system triggers in database.\nItems are :class:`Trigger` objects.") - procedures = LateBindingProperty(_get_procedures,None,None, - "List of all user procedures in database.\nItems are :class:`Procedure` objects.") - sysprocedures = LateBindingProperty(_get_sysprocedures,None,None, - "List of all system procedures in database.\nItems are :class:`Procedure` objects.") - constraints = LateBindingProperty(_get_constraints,None,None, - "List of all constraints in database.\nItems are :class:`Constraint` objects.") - roles = LateBindingProperty(_get_roles,None,None, - "List of all roles in database.\nItems are :class:`Role` objects.") - dependencies = LateBindingProperty(_get_dependencies,None,None, - "List of all dependencies in database.\nItems are :class:`Dependency` objects.") - functions = LateBindingProperty(_get_functions,None,None, - "List of all user functions defined in database.\nItems are :class:`Function` objects.") - sysfunctions = LateBindingProperty(_get_sysfunctions,None,None, - "List of all system functions defined in database.\nItems are :class:`Function` objects.") - files = LateBindingProperty(_get_files,None,None, - "List of all extension files defined for database.\nItems are :class:`DatabaseFile` objects.") - shadows = LateBindingProperty(_get_shadows,None,None, - "List of all shadows defined for database.\nItems are :class:`Shadow` objects.") - privileges = LateBindingProperty(_get_privileges,None,None, - "List of all privileges defined for database.\nItems are :class:`Privilege` objects.") + closed = property(lambda self: self._con is None) + description = property(lambda self: self.__description, doc="Database description or None if it doesn't have a description.") + owner_name = property(lambda self: self.__owner, doc="Database owner name.") + default_character_set = LateBindingProperty(_get_default_character_set, doc="Default :class:`CharacterSet` for database") + security_class = property(lambda self: self.__security_class, doc="Can refer to the security class applied as databasewide access control limits.") + collations = LateBindingProperty(_get_collations, doc=":class:`~fdb.utils.ObjectList` of all collations in database.\nItems are :class:`Collation` objects.") + character_sets = LateBindingProperty(_get_character_sets, doc=":class:`~fdb.utils.ObjectList` of all character sets in database.\nItems are :class:`CharacterSet` objects.") + exceptions = LateBindingProperty(_get_exceptions, doc=":class:`~fdb.utils.ObjectList` of all exceptions in database.\nItems are :class:`DatabaseException` objects.") + generators = LateBindingProperty(_get_generators, doc=":class:`~fdb.utils.ObjectList` of all user generators in database.\nItems are :class:`Sequence` objects.") + sysgenerators = LateBindingProperty(_get_sysgenerators, doc=":class:`~fdb.utils.ObjectList` of all system generators in database.\nItems are :class:`Sequence` objects.") + sequences = LateBindingProperty(_get_generators, doc=":class:`~fdb.utils.ObjectList` of all user generators in database.\nItems are :class:`Sequence` objects.") + syssequences = LateBindingProperty(_get_sysgenerators, doc=":class:`~fdb.utils.ObjectList` of all system generators in database.\nItems are :class:`Sequence` objects.") + domains = LateBindingProperty(_get_domains, doc=":class:`~fdb.utils.ObjectList` of all user domains in database.\nItems are :class:`Domain` objects.") + sysdomains = LateBindingProperty(_get_sysdomains, doc=":class:`~fdb.utils.ObjectList` of all system domains in database.\nItems are :class:`Domain` objects.") + indices = LateBindingProperty(_get_indices, doc=":class:`~fdb.utils.ObjectList` of all user indices in database.\nItems are :class:`Index` objects.") + sysindices = LateBindingProperty(_get_sysindices, doc=":class:`~fdb.utils.ObjectList` of all system indices in database.\nItems are :class:`Index` objects.") + tables = LateBindingProperty(_get_tables, doc=":class:`~fdb.utils.ObjectList` of all user tables in database.\nItems are :class:`Table` objects.") + systables = LateBindingProperty(_get_systables, doc=":class:`~fdb.utils.ObjectList` of all system tables in database.\nItems are :class:`Table` objects.") + views = LateBindingProperty(_get_views, doc=":class:`~fdb.utils.ObjectList` of all user views in database.\nItems are :class:`View` objects.") + sysviews = LateBindingProperty(_get_sysviews, doc=":class:`~fdb.utils.ObjectList` of all system views in database.\nItems are :class:`View` objects.") + triggers = LateBindingProperty(_get_triggers, doc=":class:`~fdb.utils.ObjectList` of all user triggers in database.\nItems are :class:`Trigger` objects.") + systriggers = LateBindingProperty(_get_systriggers, doc=":class:`~fdb.utils.ObjectList` of all system triggers in database.\nItems are :class:`Trigger` objects.") + procedures = LateBindingProperty(_get_procedures, doc=":class:`~fdb.utils.ObjectList` of all user procedures in database.\nItems are :class:`Procedure` objects.") + sysprocedures = LateBindingProperty(_get_sysprocedures, doc=":class:`~fdb.utils.ObjectList` of all system procedures in database.\nItems are :class:`Procedure` objects.") + constraints = LateBindingProperty(_get_constraints, doc=":class:`~fdb.utils.ObjectList` of all constraints in database.\nItems are :class:`Constraint` objects.") + roles = LateBindingProperty(_get_roles, doc=":class:`~fdb.utils.ObjectList` of all roles in database.\nItems are :class:`Role` objects.") + dependencies = LateBindingProperty(_get_dependencies, doc=":class:`~fdb.utils.ObjectList` of all dependencies in database.\nItems are :class:`Dependency` objects.") + functions = LateBindingProperty(_get_functions, doc=":class:`~fdb.utils.ObjectList` of all user functions defined in database.\nItems are :class:`Function` objects.") + sysfunctions = LateBindingProperty(_get_sysfunctions, doc=":class:`~fdb.utils.ObjectList` of all system functions defined in database.\nItems are :class:`Function` objects.") + files = LateBindingProperty(_get_files, doc=":class:`~fdb.utils.ObjectList` of all extension files defined for database.\nItems are :class:`DatabaseFile` objects.") + shadows = LateBindingProperty(_get_shadows, doc=":class:`~fdb.utils.ObjectList` of all shadows defined for database.\nItems are :class:`Shadow` objects.") + privileges = LateBindingProperty(_get_privileges, doc=":class:`~fdb.utils.ObjectList` of all privileges defined for database.\nItems are :class:`Privilege` objects.") + backup_history = LateBindingProperty(_get_backup_history, doc=":class:`~fdb.utils.ObjectList` of all nbackup hisotry records.\nItems are :class:`BackupHistory` objects.") + filters = LateBindingProperty(_get_filters, doc=":class:`~fdb.utils.ObjectList` of all user-defined BLOB filters.\nItems are :class:`Filter` objects.") # FB 3 - packages = LateBindingProperty(_get_packages,None,None, - "List of all packages defined for database.\nItems are :class:`Package` objects.") - linger = LateBindingProperty(_get_linger,None,None,"Database linger value.") + packages = LateBindingProperty(_get_packages, doc=":class:`~fdb.utils.ObjectList` of all packages defined for database.\nItems are :class:`Package` objects.") + linger = property(lambda self: self.__linger, doc="Database linger value.") #--- Public @@ -841,7 +933,7 @@ :param connection: :class:`~fdb.Connection` instance. - :raises ProgrammingError: If Schema object was set as internal (via + :raises `~fdb.ProgrammingError`: If Schema object was set as internal (via :meth:`_set_as_internal`). """ if self.__internal: @@ -859,18 +951,19 @@ self.__linger = row.get('RDB$LINGER') self._default_charset_name = row['RDB$CHARACTER_SET_NAME'].strip() self.__security_class = row['RDB$SECURITY_CLASS'] - if self.__security_class: self.__security_class = self.__security_class.strip() + if self.__security_class: + self.__security_class = self.__security_class.strip() self._ic.execute("select RDB$OWNER_NAME from RDB$RELATIONS where RDB$RELATION_NAME = 'RDB$DATABASE'") self.__owner = self._ic.fetchone()[0].strip() # Load enumerate types defined in RDB$TYPES table enum_select = 'select RDB$TYPE, RDB$TYPE_NAME from RDB$TYPES where RDB$FIELD_NAME = ?' def enum_dict(enum_type): - return dict((key,value.strip()) for key, value - in self._ic.execute(enum_select,(enum_type,))) + return dict((key, value.strip()) for key, value + in self._ic.execute(enum_select, (enum_type,))) # Object types self.enum_object_types = enum_dict('RDB$OBJECT_TYPE') # Object type codes - self.enum_object_type_codes = dict(((value,key) for key,value + self.enum_object_type_codes = dict(((value, key) for key, value in self.enum_object_types.items())) # Character set names self.enum_character_set_names = enum_dict('RDB$CHARACTER_SET_NAME') @@ -919,55 +1012,28 @@ def close(self): """Sever link to :class:`~fdb.Connection`. - :raises ProgrammingError: If Schema object was set as internal (via + :raises `~fdb.ProgrammingError`: If Schema object was set as internal (via :meth:`_set_as_internal`). """ if self.__internal: raise fdb.ProgrammingError("Call to 'close' not allowed for embedded Schema.") self._close() self.__clear() - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitSchema(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitSchema(self) #--- Basic Database manipulation routines def clear(self): "Drop all cached metadata objects." self.__clear() - def reload(self,data=None): + def reload(self, data=None): """Drop all or specified category of cached metadata objects, so they're reloaded from database on next reference. - :param string data: `None` or name of metadata category. + :param string data: `None`, metadata category code or iterable with category codes. - Recognized (case insensitive) names of metadata categories: + .. note:: Category codes are defined by `SCHEMA_*` globals. - - tables - - views - - domain - - indices - - dependencies - - generators - - sequences - - triggers - - procedures - - constraints - - collations - - character sets - - exceptions - - roles - - functions - - files - - shadows - - privileges - - users - - packages - - :raises ProgrammingError: For undefined metadata category. + :raises fdb.ProgrammingError: For undefined metadata category. .. note:: Also commits query transaction. """ @@ -975,116 +1041,271 @@ if not self.closed: self._ic.transaction.commit() + def get_metadata_ddl(self, sections=SCRIPT_DEFAULT_ORDER): + """Return list of DDL SQL commands for creation of specified categories of database objects. + + :param list sections: List of section identifiers. + + :returns: List with SQL commands. + + Sections identifiers are represented by `SCRIPT_*` contants defined in schema module. + + Sections are created in the order of occerence in list. Uses `SCRIPT_DEFAULT_ORDER` list when sections are not specified. +""" + def order_by_dependency(items, get_dependencies): + ordered = [] + wlist = list(items) + while len(wlist) > 0: + item = wlist.pop(0) + add = True + for dep in get_dependencies(item): + if isinstance(dep.depended_on, View) and dep.depended_on not in ordered: + wlist.append(item) + add = False + break + if add: + ordered.append(item) + return ordered + def view_dependencies(item): + return [x for x in item.get_dependencies() + if x.depended_on_type == 1] + # + script = [] + for section in sections: + if section == SCRIPT_COLLATIONS: + for collation in self.collations: + if not collation.issystemobject(): + script.append(collation.get_sql_for('create')) + elif section == SCRIPT_CHARACTER_SETS: + for charset in self.character_sets: + if charset.name != charset.default_collate.name: + script.append(charset.get_sql_for('alter', + collation=charset.default_collate.name)) + elif section == SCRIPT_UDFS: + for udf in self.functions: + if udf.isexternal(): + script.append(udf.get_sql_for('declare')) + elif section == SCRIPT_GENERATORS: + for generator in self.generators: + script.append(generator.get_sql_for('create')) + elif section == SCRIPT_EXCEPTIONS: + for e in self.exceptions: + script.append(e.get_sql_for('create')) + elif section == SCRIPT_DOMAINS: + for domain in self.domains: + script.append(domain.get_sql_for('create')) + elif section == SCRIPT_PACKAGE_DEFS: + for package in self.packages: + script.append(package.get_sql_for('create')) + elif section == SCRIPT_FUNCTION_DEFS: + for func in (x for x in self.functions if + not x.isexternal() and + not x.ispackaged()): + script.append(func.get_sql_for('create', no_code=True)) + elif section == SCRIPT_PROCEDURE_DEFS: + for proc in (x for x in self.procedures if not x.ispackaged()): + script.append(proc.get_sql_for('create', no_code=True)) + elif section == SCRIPT_TABLES: + for table in self.tables: + script.append(table.get_sql_for('create', no_pk=True, no_unique=True)) + elif section == SCRIPT_PRIMARY_KEYS: + for constraint in (x for x in self.constraints if x.ispkey()): + script.append(constraint.get_sql_for('create')) + elif section == SCRIPT_UNIQUE_CONSTRAINTS: + for table in self.tables: + for constraint in (x for x in table.constraints if x.isunique()): + script.append(constraint.get_sql_for('create')) + elif section == SCRIPT_CHECK_CONSTRAINTS: + for table in self.tables: + for constraint in (x for x in table.constraints if x.ischeck()): + script.append(constraint.get_sql_for('create')) + elif section == SCRIPT_FOREIGN_CONSTRAINTS: + for table in self.tables: + for constraint in (x for x in table.constraints if x.isfkey()): + script.append(constraint.get_sql_for('create')) + elif section == SCRIPT_INDICES: + for table in self.tables: + for index in (x for x in table.indices + if not x.isenforcer()): + script.append(index.get_sql_for('create')) + elif section == SCRIPT_VIEWS: + for view in order_by_dependency(self.views, view_dependencies): + script.append(view.get_sql_for('create')) + elif section == SCRIPT_PACKAGE_BODIES: + for package in self.packages: + script.append(package.get_sql_for('create', body=True)) + elif section == SCRIPT_PROCEDURE_BODIES: + for proc in (x for x in self.procedures if not x.ispackaged()): + script.append('ALTER' + proc.get_sql_for('create')[6:]) + elif section == SCRIPT_FUNCTION_BODIES: + for func in (x for x in self.functions if + not x.isexternal() and + not x.ispackaged()): + script.append('ALTER' + func.get_sql_for('create')[6:]) + elif section == SCRIPT_TRIGGERS: + for trigger in self.triggers: + script.append(trigger.get_sql_for('create')) + elif section == SCRIPT_ROLES: + for role in (x for x in self.roles if not x.issystemobject()): + script.append(role.get_sql_for('create')) + elif section == SCRIPT_GRANTS: + for priv in (x for x in self.privileges + if x.user_name != 'SYSDBA' + and not x.subject.issystemobject()): + script.append(priv.get_sql_for('grant')) + elif section == SCRIPT_COMMENTS: + for objects in [self.character_sets, self.collations, + self.exceptions, self.domains, + self.generators, self.tables, + self.indices, self.views, + self.triggers, self.procedures, + self.functions, self.roles]: + for obj in objects: + if obj.description is not None: + script.append(obj.get_sql_for('comment')) + if isinstance(obj, (Table, View)): + for col in obj.columns: + if col.description is not None: + script.append(col.get_sql_for('comment')) + elif isinstance(obj, Procedure): + if isinstance(obj, (Table, View)): + for par in obj.input_params: + if par.description is not None: + script.append(par.get_sql_for('comment')) + for par in obj.output_params: + if par.description is not None: + script.append(par.get_sql_for('comment')) + elif section == SCRIPT_SHADOWS: + for shadow in self.shadows: + script.append(shadow.get_sql_for('create')) + elif section == SCRIPT_INDEX_DEACTIVATIONS: + for index in self.indices: + script.append(index.get_sql_for('deactivate')) + elif section == SCRIPT_INDEX_ACTIVATIONS: + for index in self.indices: + script.append(index.get_sql_for('activate')) + elif section == SCRIPT_SET_GENERATORS: + for generator in self.generators: + script.append(generator.get_sql_for('alter', value=generator.value)) + elif section == SCRIPT_TRIGGER_DEACTIVATIONS: + for trigger in self.triggers: + script.append(trigger.get_sql_for('alter', active=False)) + elif section == SCRIPT_TRIGGER_ACTIVATIONS: + for trigger in self.triggers: + script.append(trigger.get_sql_for('alter', active=True)) + else: + raise ValueError("Unknown section code %s" % section) + return script def ismultifile(self): "Returns true if database has multiple files." return len(self.files) > 0 - def get_collation(self,name): + def get_collation(self, name): """Get :class:`Collation` by name. :param string name: Collation name. :returns: :class:`Collation` with specified name or `None`. """ - return self.__object_by_name(self._get_collations(),name) - def get_character_set(self,name): + return self.collations.get(name) + def get_character_set(self, name): """Get :class:`CharacterSet` by name. :param string name: Character set name. :returns: :class:`CharacterSet` with specified name or `None`. """ - return self.__object_by_name(self._get_character_sets(),name) - def get_exception(self,name): + return self.character_sets.get(name) + def get_exception(self, name): """Get :class:`DatabaseException` by name. :param string name: Exception name. :returns: :class:`DatabaseException` with specified name or `None`. """ - return self.__object_by_name(self._get_exceptions(),name) - def get_generator(self,name): + return self.exceptions.get(name) + def get_generator(self, name): """Get :class:`Sequence` by name. :param string name: Sequence name. :returns: :class:`Sequence` with specified name or `None`. """ - return self.__object_by_name(self._get_all_generators(),name) + return self._get_all_generators().get(name) get_sequence = get_generator - def get_index(self,name): + def get_index(self, name): """Get :class:`Index` by name. :param string name: Index name. :returns: :class:`Index` with specified name or `None`. """ - return self.__object_by_name(self._get_all_indices(),name) - def get_domain(self,name): + return self._get_all_indices().get(name) + def get_domain(self, name): """Get :class:`Domain` by name. :param string name: Domain name. :returns: :class:`Domain` with specified name or `None`. """ - return self.__object_by_name(self._get_all_domains(),name) - def get_table(self,name): + return self._get_all_domains().get(name) + def get_table(self, name): """Get :class:`Table` by name. :param string name: Table name. :returns: :class:`Table` with specified name or `None`. """ - return self.__object_by_name(self._get_all_tables(),name) - def get_view(self,name): + return self._get_all_tables().get(name) + def get_view(self, name): """Get :class:`View` by name. :param string name: View name. :returns: :class:`View` with specified name or `None`. """ - return self.__object_by_name(self._get_all_views(),name) - def get_trigger(self,name): + return self._get_all_views().get(name) + def get_trigger(self, name): """Get :class:`Trigger` by name. :param string name: Trigger name. :returns: :class:`Trigger` with specified name or `None`. """ - return self.__object_by_name(self._get_all_triggers(),name) - def get_procedure(self,name): + return self._get_all_triggers().get(name) + def get_procedure(self, name): """Get :class:`Procedure` by name. :param string name: Procedure name. :returns: :class:`Procedure` with specified name or `None`. """ - return self.__object_by_name(self._get_all_procedures(),name) - def get_constraint(self,name): + return self._get_all_procedures().get(name) + def get_constraint(self, name): """Get :class:`Constraint` by name. :param string name: Constraint name. :returns: :class:`Constraint` with specified name or `None`. """ - return self.__object_by_name(self._get_constraints(),name) - def get_role(self,name): + return self.constraints.get(name) + def get_role(self, name): """Get :class:`Role` by name. :param string name: Role name. :returns: :class:`Role` with specified name or `None`. """ - return self.__object_by_name(self._get_roles(),name) - def get_function(self,name): + return self.roles.get(name) + def get_function(self, name): """Get :class:`Function` by name. :param string name: Function name. :returns: :class:`Function` with specified name or `None`. """ - return self.__object_by_name(self._get_all_functions(),name) - def get_collation_by_id(self,charset_id,collation_id): + return self._get_all_functions().get(name) + def get_collation_by_id(self, charset_id, collation_id): """Get :class:`Collation` by ID. :param integer charset_id: Character set ID. @@ -1092,24 +1313,24 @@ :returns: :class:`Collation` with specified ID or `None`. """ - for collation in self._get_collations(): + for collation in self.collations: if (collation._attributes['RDB$CHARACTER_SET_ID'] == charset_id) and (collation.id == collation_id): return collation else: return None - def get_character_set_by_id(self,id): + def get_character_set_by_id(self, id): """Get :class:`CharacterSet` by ID. :param integer name: CharacterSet ID. :returns: :class:`CharacterSet` with specified ID or `None`. """ - for charset in self._get_character_sets(): + for charset in self.character_sets: if charset.id == id: return charset else: return None - def get_privileges_of(self,user, user_type=None): + def get_privileges_of(self, user, user_type=None): """Get list of all privileges granted to user/database object. :param user: User name or instance of class that represents possible user. @@ -1119,54 +1340,52 @@ Numeric code for user type, see :attr:`Schema.enum_object_types`. :returns: List of :class:`Privilege` objects. - :raises ProgrammingError: For unknown `user_type` code. + :raises `~fdb.ProgrammingError`: For unknown `user_type` code. """ - if isinstance(user,(fdb.StringType,fdb.UnicodeType)): + if isinstance(user, (fdb.StringType, fdb.UnicodeType)): if (user_type is None) or (user_type not in self.enum_object_types): raise fdb.ProgrammingError("Unknown user_type code.") else: uname = user utype = [user_type] - elif isinstance(user,(Table,View,Procedure,Trigger,Role)): + elif isinstance(user, (Table, View, Procedure, Trigger, Role)): uname = user.name utype = user._type_code - elif isinstance(user,fdb.services.User): + elif isinstance(user, fdb.services.User): uname = user.name utype = [8] return [p for p in self.privileges - if ((p.user_name == uname) and (p.user_type in utype))] - def get_package(self,name): + if (p.user_name == uname) and (p.user_type in utype)] + def get_package(self, name): """Get :class:`Package` by name. :param string name: Package name. :returns: :class:`Package` with specified name or `None`. """ - return self.__object_by_name(self._get_packages(),name) + return self.packages.get(name) -class BaseSchemaItem(object): - """Base class for all database schema objects. - """ +class BaseSchemaItem(Visitable): + """Base class for all database schema objects.""" #: Weak reference to parent :class:`Schema` instance. schema = None - def __init__(self,schema,attributes): - self.schema = schema if type(schema) == weakref.ProxyType else weakref.proxy(schema) + def __init__(self, schema, attributes): + self.schema = schema if isinstance(schema, weakref.ProxyType) else weakref.proxy(schema) self._type_code = [] self._attributes = dict(attributes) self._actions = [] #--- protected - - def _strip_attribute(self,attr): + def _strip_attribute(self, attr): if self._attributes.get(attr): self._attributes[attr] = self._attributes[attr].strip() - def _check_params(self,params,param_names): + def _check_params(self, params, param_names): p = set(params.keys()) n = set(param_names) if not p.issubset(n): raise fdb.ProgrammingError("Unsupported parameter(s) '%s'" % ','.join(p.difference(n))) - def _needs_quoting(self,ident): + def _needs_quoting(self, ident): if not ident: return False if self.schema.opt_always_quote: @@ -1176,8 +1395,8 @@ for char in ident: if char not in string.ascii_uppercase + string.digits + '$_': return True - return isKeyword(ident) - def _get_quoted_ident(self,ident): + return iskeyword(ident) + def _get_quoted_ident(self, ident): if self._needs_quoting(ident): return '"%s"' % ident else: @@ -1188,55 +1407,40 @@ return self._attributes.get('RDB$DESCRIPTION') def _get_actions(self): return self._actions - def _get_recreate_sql(self,**params): + def _get_recreate_sql(self, **params): return 'RE'+self._get_create_sql(**params) - def _get_create_or_alter_sql(self,**params): + def _get_create_or_alter_sql(self, **params): return 'CREATE OR ALTER' + self._get_create_sql(**params)[6:] #--- properties - - name = LateBindingProperty(_get_name,None,None, - "Database object name or None if object doesn't have a name.") - description = LateBindingProperty(_get_description,None,None, - "Database object description or None if object doesn't have a description.") - actions = LateBindingProperty(_get_actions,None,None, - "List of supported SQL operations on metadata object instance.") - + name = LateBindingProperty(_get_name, doc="Database object name or None if object doesn't have a name.") + description = LateBindingProperty(_get_description, doc="Database object description or None if object doesn't have a description.") + actions = LateBindingProperty(_get_actions, doc="List of supported SQL operations on metadata object instance.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitMetadatItem(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitMetadataItem(self) def issystemobject(self): "Returns True if this database object is system object." - return True if self._attributes.get('RDB$SYSTEM_FLAG',False) else False + return True if self._attributes.get('RDB$SYSTEM_FLAG', False) else False def get_quoted_name(self): "Returns quoted (if necessary) name." - if self._needs_quoting(self.name): - return '"%s"' % self.name - else: - return self.name + return self._get_quoted_ident(self.name) def get_dependents(self): "Returns list of all database objects that depend on this one." return [d for d in self.schema.dependencies if d.depended_on_name == self.name and - d.depended_on_type in self._type_code] + d.depended_on_type in self._type_code] def get_dependencies(self): "Returns list of database objects that this object depend on." return [d for d in self.schema.dependencies if d.dependent_name == self.name and - d.dependent_type in self._type_code] - def get_sql_for(self,action,**params): + d.dependent_type in self._type_code] + def get_sql_for(self, action, **params): """Returns SQL command for specified action on metadata object. Supported actions are defined by :attr:`actions` list. - :raises ProgrammingError: For unsupported action or wrong parameters passed. + :raises `~fdb.ProgrammingError`: For unsupported action or wrong parameters passed. """ _action = action.lower() if _action in self._actions: - _call = getattr(self,'_get_%s_sql' % _action) + _call = getattr(self, '_get_%s_sql' % _action) return _call(**params) else: raise fdb.ProgrammingError("Unsupported action '%s'" % action) @@ -1246,11 +1450,11 @@ Supported SQL actions: - - User collation: create, drop - - System collation: none + - User collation: create, drop, comment + - System collation: comment """ - def __init__(self,schema,attributes): - super(Collation,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Collation, self).__init__(schema, attributes) self._type_code = [17,] self._strip_attribute('RDB$COLLATION_NAME') @@ -1259,16 +1463,16 @@ self._strip_attribute('RDB$SECURITY_CLASS') self._strip_attribute('RDB$OWNER_NAME') + self._actions = ['comment'] if not self.issystemobject(): - self._actions = ['create','drop'] + self._actions.extend(['create', 'drop']) #--- Protected - - def _get_drop_sql(self,**params): - self._check_params(params,[]) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'DROP COLLATION %s' % self.get_quoted_name() - def _get_create_sql(self,**params): - self._check_params(params,[]) + def _get_create_sql(self, **params): + self._check_params(params, []) base_sql = """CREATE COLLATION %s FOR %s %s @@ -1285,6 +1489,10 @@ 'ACCENT INSENSITIVE' if self.isaccentinsensitive() else 'ACCENT SENSITIVE', "'%s'" % self.specific_attributes if self.specific_attributes else '') return base_sql.strip() + def _get_comment_sql(self, **params): + return 'COMMENT ON COLLATION %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None else + "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$COLLATION_NAME'] def _get_id(self): @@ -1308,31 +1516,16 @@ return self._attributes.get('RDB$OWNER_NAME') #--- Properties - - id = LateBindingProperty(_get_id,None,None,"Collation ID.") - character_set = LateBindingProperty(_get_character_set,None,None, - "Character set object associated with collation.") - base_collation = LateBindingProperty(_get_base_collation,None,None, - "Base Collation object that's extended by this one or None.") - attributes = LateBindingProperty(_get_attributes,None,None, - "Collation attributes.") - specific_attributes = LateBindingProperty(_get_specific_attributes,None,None, - "Collation specific attributes.") - function_name = LateBindingProperty(_get_function_name,None,None, - "Not currently used.") + id = LateBindingProperty(_get_id, doc="Collation ID.") + character_set = LateBindingProperty(_get_character_set, doc="Character set object associated with collation.") + base_collation = LateBindingProperty(_get_base_collation, doc="Base Collation object that's extended by this one or None.") + attributes = LateBindingProperty(_get_attributes, doc="Collation attributes.") + specific_attributes = LateBindingProperty(_get_specific_attributes, doc="Collation specific attributes.") + function_name = LateBindingProperty(_get_function_name, doc="Not currently used.") # FB 3.0 - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class name or None.") - owner_name = LateBindingProperty(_get_owner_name,None,None,"Creator user name.") - + security_class = LateBindingProperty(_get_security_class, doc="Security class name or None.") + owner_name = LateBindingProperty(_get_owner_name, doc="Creator user name.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitCollation(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitCollation(self) def ispadded(self): """Returns True if collation has PAD SPACE attribute.""" return bool(self.attributes & COLLATION_PAD_SPACE) @@ -1344,15 +1537,15 @@ return bool(self.attributes & COLLATION_ACCENT_INSENSITIVE) def isbasedonexternal(self): "Returns True if collation is based on external collation definition." - return (self._attributes['RDB$BASE_COLLATION_NAME'] and not self.base_collation) + return self._attributes['RDB$BASE_COLLATION_NAME'] and not self.base_collation class CharacterSet(BaseSchemaItem): """Represents character set. - Supported SQL actions: alter(collation=Collation instance or collation name) + Supported SQL actions: alter(collation=Collation instance or collation name), comment """ - def __init__(self,schema,attributes): - super(CharacterSet,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(CharacterSet, self).__init__(schema, attributes) self._type_code = [11,] self._strip_attribute('RDB$CHARACTER_SET_NAME') @@ -1360,18 +1553,23 @@ self._strip_attribute('RDB$SECURITY_CLASS') self._strip_attribute('RDB$OWNER_NAME') - self._actions = ['alter'] + self._actions = ['alter', 'comment'] #--- protected - - def _get_alter_sql(self,**params): - self._check_params(params,['collation']) + def _get_alter_sql(self, **params): + self._check_params(params, ['collation']) collation = params.get('collation') if collation: - return ('ALTER CHARACTER SET %s SET DEFAULT COLLATION %s' % (self.name, - collation.name if isinstance(collation,Collation) else collation)) + return ('ALTER CHARACTER SET %s SET DEFAULT COLLATION %s' % (self.get_quoted_name(), + collation.get_quoted_name() + if isinstance(collation, Collation) + else collation)) else: raise fdb.ProgrammingError("Missing required parameter: 'collation'.") + def _get_comment_sql(self, **params): + return 'COMMENT ON CHARACTER SET %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$CHARACTER_SET_NAME'] def _get_id(self): @@ -1381,45 +1579,27 @@ def _get_default_collate(self): return self.get_collation(self._attributes['RDB$DEFAULT_COLLATE_NAME']) def _get_collations(self): - r = [c for c in self.schema.collations - if c._attributes['RDB$CHARACTER_SET_ID'] == self.id] - return r + return self.schema.collations.filter(lambda item: item._attributes['RDB$CHARACTER_SET_ID'] == self.id) def _get_security_class(self): return self._attributes.get('RDB$SECURITY_CLASS') def _get_owner_name(self): return self._attributes.get('RDB$OWNER_NAME') #--- properties - - id = LateBindingProperty(_get_id,None,None,"Character set ID.") - bytes_per_character = LateBindingProperty(_get_bytes_per_character,None,None, - "Size of characters in bytes.") - default_collate = LateBindingProperty(_get_default_collate,None,None, - "Collate object of default collate.") - collations = LateBindingProperty(_get_collations,None,None, - "List of Collations associated with character set.") + id = LateBindingProperty(_get_id, doc="Character set ID.") + bytes_per_character = LateBindingProperty(_get_bytes_per_character, doc="Size of characters in bytes.") + default_collate = LateBindingProperty(_get_default_collate, doc="Collate object of default collate.") + collations = LateBindingProperty(_get_collations, doc=":class:`~fdb.utils.ObjectList` of Collations associated with character set.") # FB 3.0 - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class name or None.") - owner_name = LateBindingProperty(_get_owner_name,None,None,"Creator user name.") - + security_class = LateBindingProperty(_get_security_class, doc="Security class name or None.") + owner_name = LateBindingProperty(_get_owner_name, doc="Creator user name.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitCharacterSet(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitCharacterSet(self) - def get_collation(self,name): + def get_collation(self, name): """Return :class:`Collation` object with specified name that belongs to this character set. """ - for col in self.collations: - if col.name == name: - return col - return None - def get_collation_by_id(self,id): + return self.collations.get(name) + def get_collation_by_id(self, id): """Return :class:`Collation` object with specified id that belongs to this character set. """ @@ -1433,37 +1613,42 @@ Supported SQL actions: - - User exception: create, recreate, alter(message=string), create_or_alter, drop - - System exception: none + - User exception: create, recreate, alter(message=string), create_or_alter, drop, comment + - System exception: comment """ - def __init__(self,schema,attributes): - super(DatabaseException,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(DatabaseException, self).__init__(schema, attributes) self._type_code = [7,] self._strip_attribute('RDB$EXCEPTION_NAME') self._strip_attribute('RDB$SECURITY_CLASS') self._strip_attribute('RDB$OWNER_NAME') + self._actions = ['comment'] if not self.issystemobject(): - self._actions = ['create','recreate','alter','create_or_alter','drop'] + self._actions.extend(['create', 'recreate', 'alter', 'create_or_alter', 'drop']) - #--- Protected - def _get_create_sql(self,**params): - self._check_params(params,[]) + #--- Protected + def _get_create_sql(self, **params): + self._check_params(params, []) return "CREATE EXCEPTION %s '%s'" % (self.get_quoted_name(), escape_single_quotes(self.message)) - def _get_alter_sql(self,**params): - self._check_params(params,['message']) + def _get_alter_sql(self, **params): + self._check_params(params, ['message']) message = params.get('message') if message: return "ALTER EXCEPTION %s '%s'" % (self.get_quoted_name(), - escape_single_quotes(message)) + escape_single_quotes(message)) else: raise fdb.ProgrammingError("Missing required parameter: 'message'.") - def _get_drop_sql(self,**params): - self._check_params(params,[]) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'DROP EXCEPTION %s' % self.get_quoted_name() + def _get_comment_sql(self, **params): + return 'COMMENT ON EXCEPTION %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$EXCEPTION_NAME'] def _get_id(self): @@ -1476,64 +1661,61 @@ return self._attributes.get('RDB$OWNER_NAME') #--- Properties - - id = LateBindingProperty(_get_id,None,None, - "System-assigned unique exception number.") - message = LateBindingProperty(_get_message,None,None,"Custom message text.") + id = LateBindingProperty(_get_id, doc="System-assigned unique exception number.") + message = LateBindingProperty(_get_message, doc="Custom message text.") # FB 3.0 - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class name or None.") - owner_name = LateBindingProperty(_get_owner_name,None,None,"Creator user name.") - - #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitException(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitException(self) + security_class = LateBindingProperty(_get_security_class, doc="Security class name or None.") + owner_name = LateBindingProperty(_get_owner_name, doc="Creator user name.") class Sequence(BaseSchemaItem): """Represents database generator/sequence. Supported SQL actions: - - User sequence: create, alter(value=number), drop - - System sequence: none + - User sequence: create, alter(value=number), drop, comment + - System sequence: comment """ - def __init__(self,schema,attributes): - super(Sequence,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Sequence, self).__init__(schema, attributes) self._type_code = [14,] self._strip_attribute('RDB$GENERATOR_NAME') self._strip_attribute('RDB$SECURITY_CLASS') self._strip_attribute('RDB$OWNER_NAME') + self._actions = ['comment'] if not self.issystemobject(): - self._actions = ['create','alter','drop'] + self._actions.extend(['create', 'alter', 'drop']) #--- protected - - def _get_create_sql(self,**params): - self._check_params(params,[]) - return 'CREATE SEQUENCE %s' % self.get_quoted_name() - def _get_alter_sql(self,**params): - self._check_params(params,['value']) + def _get_create_sql(self, **params): + self._check_params(params, []) + return 'CREATE %s %s' % (self.schema.opt_generator_keyword, + self.get_quoted_name()) + def _get_alter_sql(self, **params): + self._check_params(params, ['value']) value = params.get('value') if value is not None: - return "ALTER SEQUENCE %s RESTART WITH %d" % (self.get_quoted_name(),value) + return "ALTER %s %s RESTART WITH %d" % (self.schema.opt_generator_keyword, + self.get_quoted_name(), + value) else: raise fdb.ProgrammingError("Missing required parameter: 'value'.") - def _get_drop_sql(self,**params): - self._check_params(params,[]) - return 'DROP SEQUENCE %s' % self.get_quoted_name() + def _get_drop_sql(self, **params): + self._check_params(params, []) + return 'DROP %s %s' % (self.schema.opt_generator_keyword, + self.get_quoted_name()) + def _get_comment_sql(self, **params): + return 'COMMENT ON %s %s IS %s' % (self.schema.opt_generator_keyword, + self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$GENERATOR_NAME'] def _get_id(self): return self._attributes['RDB$GENERATOR_ID'] def _get_value(self): - return self.schema._select_row("select GEN_ID(%s,0) from RDB$DATABASE" % self.name)['GEN_ID'] + return self.schema._select_row("select GEN_ID(%s,0) from RDB$DATABASE" % self.get_quoted_name())['GEN_ID'] def _get_security_class(self): return self._attributes.get('RDB$SECURITY_CLASS') def _get_owner_name(self): @@ -1544,24 +1726,14 @@ return self._attributes.get('RDB$GENERATOR_INCREMENT') #--- Properties - - id = LateBindingProperty(_get_id,None,None,"Internal ID number of the sequence.") - value = LateBindingProperty(_get_value,None,None,"Current sequence value.") + id = LateBindingProperty(_get_id, doc="Internal ID number of the sequence.") + value = LateBindingProperty(_get_value, doc="Current sequence value.") # FB 3.0 - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class name or None.") - owner_name = LateBindingProperty(_get_owner_name,None,None,"Creator user name.") - inital_value = LateBindingProperty(_get_inital_value,None,None,"Initial sequence value.") - increment = LateBindingProperty(_get_increment,None,None,"Sequence increment.") - + security_class = LateBindingProperty(_get_security_class, doc="Security class name or None.") + owner_name = LateBindingProperty(_get_owner_name, doc="Creator user name.") + inital_value = LateBindingProperty(_get_inital_value, doc="Initial sequence value.") + increment = LateBindingProperty(_get_increment, doc="Sequence increment.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitGenerator(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitGenerator(self) def isidentity(self): "Returns True for system generators created for IDENTITY columns." return self._attributes['RDB$SYSTEM_FLAG'] == 6 @@ -1572,12 +1744,12 @@ Supported SQL actions: - User column: alter(name=string,datatype=string_SQLTypeDef,position=number, - expression=computed_by_expr,restart=None_or_init_value), drop - - System column: none + expression=computed_by_expr,restart=None_or_init_value), drop, comment + - System column: comment """ - def __init__(self,schema,table,attributes): - super(TableColumn,self).__init__(schema,attributes) - self._type_code = [3,9] + def __init__(self, schema, table, attributes): + super(TableColumn, self).__init__(schema, attributes) + self._type_code = [3, 9] self.__table = weakref.proxy(table) self._strip_attribute('RDB$FIELD_NAME') @@ -1586,13 +1758,13 @@ self._strip_attribute('RDB$SECURITY_CLASS') self._strip_attribute('RDB$GENERATOR_NAME') + self._actions = ['comment'] if not self.issystemobject(): - self._actions = ['alter','drop'] + self._actions.extend(['alter', 'drop']) #--- Protected - - def _get_alter_sql(self,**params): - self._check_params(params,['expression','datatype','name','position','restart']) + def _get_alter_sql(self, **params): + self._check_params(params, ['expression', 'datatype', 'name', 'position', 'restart']) new_expr = params.get('expression') new_type = params.get('datatype') new_name = params.get('name') @@ -1606,9 +1778,9 @@ sql = 'ALTER TABLE %s ALTER COLUMN %s' % (self.table.get_quoted_name(), self.get_quoted_name()) if new_name: - return '%s TO %s' % (sql,self._get_quoted_ident(new_name)) + return '%s TO %s' % (sql, self._get_quoted_ident(new_name)) elif new_position: - return '%s POSITION %d' % (sql,new_position) + return '%s POSITION %d' % (sql, new_position) elif new_type or new_expr: result = sql if new_type: @@ -1624,12 +1796,19 @@ return sql else: raise fdb.ProgrammingError("Parameter required.") - def _get_drop_sql(self,**params): - self._check_params(params,[]) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'ALTER TABLE %s DROP %s' % (self.table.get_quoted_name(), self.get_quoted_name()) + def _get_comment_sql(self, **params): + return 'COMMENT ON COLUMN %s.%s IS %s' % (self.table.get_quoted_name(), + self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$FIELD_NAME'] + def _get_id(self): + return self._attributes['RDB$FIELD_ID'] def _get_table(self): return self.__table def _get_domain(self): @@ -1650,45 +1829,28 @@ def _get_datatype(self): return self.domain.datatype def _get_privileges(self): - return [p for p in self.schema.privileges - if (p.subject_name == self.table.name and - p.field_name == self.name and - p.subject_type in self.table._type_code)] + return self.schema.privileges.filter(lambda p: (p.subject_name == self.table.name and + p.field_name == self.name and + p.subject_type in self.table._type_code)) def _get_generator(self): return self.schema.get_generator(self._attributes.get('RDB$GENERATOR_NAME')) def _get_identity_type(self): return self._attributes.get('RDB$IDENTITY_TYPE') #--- Properties - - table = LateBindingProperty(_get_table,None,None, - "The Table object this column belongs to.") - domain = LateBindingProperty(_get_domain,None,None, - "Domain object this column is based on.") - position = LateBindingProperty(_get_position,None,None, - "Column's sequence number in row.") - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class name or None.") - default = LateBindingProperty(_get_default,None,None, - "Default value for column or None.") - collation = LateBindingProperty(_get_collation,None,None, - "Collation object or None.") - datatype = LateBindingProperty(_get_datatype,None,None, - "Comlete SQL datatype definition.") - privileges = LateBindingProperty(_get_privileges,None,None, - "List of :class:`Privilege` objects granted to this object.") + id = LateBindingProperty(_get_id, doc="Internam number ID for the column.") + table = LateBindingProperty(_get_table, doc="The Table object this column belongs to.") + domain = LateBindingProperty(_get_domain, doc="Domain object this column is based on.") + position = LateBindingProperty(_get_position, doc="Column's sequence number in row.") + security_class = LateBindingProperty(_get_security_class, doc="Security class name or None.") + default = LateBindingProperty(_get_default, doc="Default value for column or None.") + collation = LateBindingProperty(_get_collation, doc="Collation object or None.") + datatype = LateBindingProperty(_get_datatype, doc="Comlete SQL datatype definition.") + privileges = LateBindingProperty(_get_privileges, doc=":class:`~fdb.utils.ObjectList` of :class:`Privilege` objects granted to this object.") # FB 3.0 - generator = LateBindingProperty(_get_generator,None,None,"Internal flags.") - identity_type = LateBindingProperty(_get_identity_type,None,None,"Internal flags.") - + generator = LateBindingProperty(_get_generator, doc="Internal flags.") + identity_type = LateBindingProperty(_get_identity_type, doc="Internal flags.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitTableColumn(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitTableColumn(self) def get_dependents(self): "Return list of all database objects that depend on this one." return [d for d in self.schema.dependencies @@ -1726,12 +1888,12 @@ Supported SQL actions: - - User index: create, activate, deactivate, recompute, drop - - System index: recompute + - User index: create, activate, deactivate, recompute, drop, comment + - System index: activate, recompute, comment """ - def __init__(self,schema,attributes): - super(Index,self).__init__(schema,attributes) - self._type_code = [6,10] + def __init__(self, schema, attributes): + super(Index, self).__init__(schema, attributes) + self._type_code = [6, 10] self.__segment_names = None self.__segment_statistics = None @@ -1739,32 +1901,33 @@ self._strip_attribute('RDB$RELATION_NAME') self._strip_attribute('RDB$FOREIGN_KEY') - if self.issystemobject(): - self._actions = ['recompute'] - else: - self._actions = ['create','activate','deactivate','recompute','drop'] + self._actions = ['activate', 'recompute', 'comment'] + if not self.issystemobject(): + self._actions.extend(['create', 'deactivate', 'drop']) #--- Protected - - def _get_create_sql(self,**params): - self._check_params(params,[]) - return """CREATE %s%s INDEX %s - ON %s %s""" % ('UNIQUE ' if self.isunique() else '', - self.index_type, self.get_quoted_name(),self.table.name, - 'COMPUTED BY %s' % self.expression if self.isexpression() - else '(%s)' % ','.join(self.segment_names)) - def _get_activate_sql(self,**params): - self._check_params(params,[]) + def _get_create_sql(self, **params): + self._check_params(params, []) + return """CREATE %s%s INDEX %s ON %s %s""" % ('UNIQUE ' if self.isunique() else '', + self.index_type, self.get_quoted_name(), self.table.name, + 'COMPUTED BY %s' % self.expression if self.isexpression() + else '(%s)' % ','.join(self.segment_names)) + def _get_activate_sql(self, **params): + self._check_params(params, []) return 'ALTER INDEX %s ACTIVE' % self.get_quoted_name() - def _get_deactivate_sql(self,**params): - self._check_params(params,[]) + def _get_deactivate_sql(self, **params): + self._check_params(params, []) return 'ALTER INDEX %s INACTIVE' % self.get_quoted_name() - def _get_recompute_sql(self,**params): - self._check_params(params,[]) + def _get_recompute_sql(self, **params): + self._check_params(params, []) return 'SET STATISTICS INDEX %s' % self.get_quoted_name() - def _get_drop_sql(self,**params): - self._check_params(params,[]) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'DROP INDEX %s' % self.get_quoted_name() + def _get_comment_sql(self, **params): + return 'COMMENT ON INDEX %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$INDEX_NAME'] def _get_table(self): @@ -1782,13 +1945,13 @@ def _get_statistics(self): return self._attributes['RDB$STATISTICS'] def _get_segments(self): - return [self.table.get_column(colname) for colname in self.segment_names] + return ObjectList(self.table.get_column(colname) for colname in self.segment_names) def _get_segment_names(self): if self.__segment_names is None: if self._attributes['RDB$SEGMENT_COUNT'] > 0: self.__segment_names = [r['RDB$FIELD_NAME'].strip() for r in self.schema._select("""select rdb$field_name -from rdb$index_segments where rdb$index_name = ? order by rdb$field_position""",(self.name,))] +from rdb$index_segments where rdb$index_name = ? order by rdb$field_position""", (self.name,))] else: self.__segment_names = [] return self.__segment_names @@ -1797,8 +1960,8 @@ if self._attributes['RDB$SEGMENT_COUNT'] > 0: if self.schema._con.ods >= fdb.ODS_FB_21: self.__segment_statistics = [r['RDB$STATISTICS'] for r - in self.schema._select("""select RDB$STATISTICS -from rdb$index_segments where rdb$index_name = ? order by rdb$field_position""",(self.name,))] + in self.schema._select("""select RDB$STATISTICS +from rdb$index_segments where rdb$index_name = ? order by rdb$field_position""", (self.name,))] else: self.__segment_statistics = [None for x in range(self._attributes['RDB$SEGMENT_COUNT'])] else: @@ -1812,36 +1975,17 @@ return None #--- Properties - - table = LateBindingProperty(_get_table,None,None, - "The :class:`Table` instance the index applies to.") - id = LateBindingProperty(_get_id,None,None, - "Internal number ID of the index.") - index_type = LateBindingProperty(_get_index_type,None,None, - "ASCENDING or DESCENDING.") - partner_index = LateBindingProperty(_get_partner_index,None,None, - "Associated unique/primary key :class:`Index` instance, or None.") - expression = LateBindingProperty(_get_expression,None,None, - "Source of an expression or None.") - statistics = LateBindingProperty(_get_statistics,None,None, - "Latest selectivity of the index.") - segment_names = LateBindingProperty(_get_segment_names,None,None, - "List of index segment names.") - segment_statistics = LateBindingProperty(_get_segment_statistics,None,None, - "List of index segment statistics (for ODS 11.1 and higher).") - segments = LateBindingProperty(_get_segments,None,None, - "List of index segments as :class:`TableColumn` instances.") - constraint = LateBindingProperty(_get_constraint,None,None, - ":class:`Constraint` instance that uses this index or None.") - + table = LateBindingProperty(_get_table, doc="The :class:`Table` instance the index applies to.") + id = LateBindingProperty(_get_id, doc="Internal number ID of the index.") + index_type = LateBindingProperty(_get_index_type, doc="ASCENDING or DESCENDING.") + partner_index = LateBindingProperty(_get_partner_index, doc="Associated unique/primary key :class:`Index` instance, or None.") + expression = LateBindingProperty(_get_expression, doc="Source of an expression or None.") + statistics = LateBindingProperty(_get_statistics, doc="Latest selectivity of the index.") + segment_names = LateBindingProperty(_get_segment_names, doc="List of index segment names.") + segment_statistics = LateBindingProperty(_get_segment_statistics, doc="List of index segment statistics (for ODS 11.1 and higher).") + segments = LateBindingProperty(_get_segments, doc=":class:`~fdb.utils.ObjectList` of index segments as :class:`TableColumn` instances.") + constraint = LateBindingProperty(_get_constraint, doc=":class:`Constraint` instance that uses this index or None.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitIndex(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitIndex(self) def issystemobject(self): "Returns True if this database object is system object." return bool(self._attributes['RDB$SYSTEM_FLAG'] @@ -1862,11 +2006,11 @@ class ViewColumn(BaseSchemaItem): """Represents view column. - Supported SQL actions: none + Supported SQL actions: comment """ - def __init__(self,schema,view,attributes): - super(ViewColumn,self).__init__(schema,attributes) - self._type_code = [3,9] + def __init__(self, schema, view, attributes): + super(ViewColumn, self).__init__(schema, attributes) + self._type_code = [3, 9] self.__view = weakref.proxy(view) self._strip_attribute('RDB$FIELD_NAME') @@ -1876,8 +2020,14 @@ self._strip_attribute('RDB$SECURITY_CLASS') self._strip_attribute('BASE_RELATION') - #--- Protected + self._actions = ['comment'] + #--- Protected + def _get_comment_sql(self, **params): + return 'COMMENT ON COLUMN %s.%s IS %s' % (self.view.get_quoted_name(), + self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$FIELD_NAME'] def _get_base_field(self): @@ -1909,40 +2059,22 @@ def _get_datatype(self): return self.domain.datatype def _get_privileges(self): - return [p for p in self.schema.privileges - if (p.subject_name == self.view.name and - p.field_name == self.name and - p.subject_type == 0)] # Views are logged as Tables in RDB$USER_PRIVILEGES + return self.schema.privileges.filter(lambda p: (p.subject_name == self.view.name and + p.field_name == self.name and + p.subject_type == 0)) # Views are logged as Tables in RDB$USER_PRIVILEGES #--- Properties - - base_field = LateBindingProperty(_get_base_field,None,None, - "The source column from the base relation. Result could be either " - ":class:`TableColumn`, :class:`ViewColumn` or :class:`ProcedureParameter` " - "instance or None.") - view = LateBindingProperty(_get_view,None,None, - "View object this column belongs to.") - domain = LateBindingProperty(_get_domain,None,None, - "Domain object this column is based on.") - position = LateBindingProperty(_get_position,None,None, - "Column's sequence number in row.") - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class name or None.") - collation = LateBindingProperty(_get_collation,None,None, - "Collation object or None.") - datatype = LateBindingProperty(_get_datatype,None,None, - "Comlete SQL datatype definition.") - privileges = LateBindingProperty(_get_privileges,None,None, - "List of :class:`Privilege` objects granted to this object.") - + base_field = LateBindingProperty(_get_base_field, doc="The source column from the base relation. Result could be either " + ":class:`TableColumn`, :class:`ViewColumn` or :class:`ProcedureParameter` " + "instance or None.") + view = LateBindingProperty(_get_view, doc="View object this column belongs to.") + domain = LateBindingProperty(_get_domain, doc="Domain object this column is based on.") + position = LateBindingProperty(_get_position, doc="Column's sequence number in row.") + security_class = LateBindingProperty(_get_security_class, doc="Security class name or None.") + collation = LateBindingProperty(_get_collation, doc="Collation object or None.") + datatype = LateBindingProperty(_get_datatype, doc="Comlete SQL datatype definition.") + privileges = LateBindingProperty(_get_privileges, doc=":class:`~fdb.utils.ObjectList` of :class:`Privilege` objects granted to this object.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitViewColumn(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitViewColumn(self) def get_dependents(self): "Return list of all database objects that depend on this one." return [d for d in self.schema.dependencies @@ -1966,56 +2098,64 @@ Supported SQL actions: - User domain: create, alter(name=string,default=string_definition_or_None, - check=string_definition_or_None,datatype=string_SQLTypeDef), drop - - System domain: none + check=string_definition_or_None,datatype=string_SQLTypeDef), drop, comment + - System domain: comment """ - def __init__(self,schema,attributes): - super(Domain,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Domain, self).__init__(schema, attributes) self._type_code = [9] self._strip_attribute('RDB$FIELD_NAME') self._strip_attribute('RDB$SECURITY_CLASS') self._strip_attribute('RDB$OWNER_NAME') + self._actions = ['comment'] if not self.issystemobject(): - self._actions = ['create','alter','drop'] + self._actions.extend(['create', 'alter', 'drop']) #--- Protected - - def _get_create_sql(self,**params): - self._check_params(params,[]) - sql = 'CREATE DOMAIN %s AS %s' % (self.get_quoted_name(),self.datatype) + def _get_create_sql(self, **params): + self._check_params(params, []) + sql = 'CREATE DOMAIN %s AS %s' % (self.get_quoted_name(), self.datatype) if self.has_default(): sql += ' DEFAULT %s' % self.default + if not self.isnullable(): + sql += ' NOT NULL' if self.isvalidated(): sql += ' ' + self.validation if self._attributes['RDB$COLLATION_ID']: - sql += 'COLLATE %s' % self._attributes['RDB$COLLATION_ID'] + #sql += ' COLLATE %s' % self.collation.get_quoted_name() + if self.character_set._attributes['RDB$DEFAULT_COLLATE_NAME'] != self.collation.name: + sql += ' COLLATE %s' % self.collation.get_quoted_name() return sql - def _get_alter_sql(self,**params): - self._check_params(params,['name','default','check','datatype']) + def _get_alter_sql(self, **params): + self._check_params(params, ['name', 'default', 'check', 'datatype']) new_name = params.get('name') - new_default = params.get('default','') - new_constraint = params.get('check','') + new_default = params.get('default', '') + new_constraint = params.get('check', '') new_type = params.get('datatype') sql = 'ALTER DOMAIN %s' % self.get_quoted_name() if len(params) > 1: raise fdb.ProgrammingError("Only one parameter allowed.") if new_name: - return '%s TO %s' % (sql,self._get_quoted_ident(new_name)) + return '%s TO %s' % (sql, self._get_quoted_ident(new_name)) elif new_default != '': - return ('%s SET DEFAULT %s' % (sql,new_default) if new_default + return ('%s SET DEFAULT %s' % (sql, new_default) if new_default else '%s DROP DEFAULT' % sql) elif new_constraint != '': - return ('%s ADD CHECK (%s)' % (sql,new_constraint) if new_constraint + return ('%s ADD CHECK (%s)' % (sql, new_constraint) if new_constraint else '%s DROP CONSTRAINT' % sql) elif new_type: - return '%s TYPE %s' % (sql,new_type) + return '%s TYPE %s' % (sql, new_type) else: raise fdb.ProgrammingError("Parameter required.") - def _get_drop_sql(self,**params): - self._check_params(params,[]) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'DROP DOMAIN %s' % self.get_quoted_name() + def _get_comment_sql(self, **params): + return 'COMMENT ON DOMAIN %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$FIELD_NAME'] def _get_expression(self): @@ -2061,11 +2201,11 @@ def _get_datatype(self): l = [] precision_known = False - if self.field_type in (FBT_SMALLINT,FBT_INTEGER,FBT_BIGINT): + if self.field_type in (FBT_SMALLINT, FBT_INTEGER, FBT_BIGINT): if self.precision != None: - if (self.sub_type > 0) and (self.sub_type < MAX_INTSUBTYPES): + if (self.sub_type > 0) and (self.sub_type <= MAX_INTSUBTYPES): l.append('%s(%d, %d)' % \ - (INTEGRAL_SUBTYPES[self.sub_type],self.precision,-self.scale)) + (INTEGRAL_SUBTYPES[self.sub_type], self.precision, -self.scale)) precision_known = True if not precision_known: if (self.field_type == FBT_SMALLINT) and (self.scale < 0): @@ -2076,28 +2216,24 @@ l.append('NUMERIC(15, %d)' % -self.scale) else: l.append(COLUMN_TYPES[self.field_type]) - if self.field_type in (FBT_CHAR,FBT_VARCHAR): - l.append('(%d)' % (self.length if self.character_length == None else self.character_length)) + if self.field_type in (FBT_CHAR, FBT_VARCHAR): + l.append('(%d)' % (self.length if self.character_length is None else self.character_length)) if self._attributes['RDB$DIMENSIONS'] != None: l.append('[%s]' % ', '.join('%d' % u if l == 1 - else '%d:%d' % (l,u) - for l,u in self.dimensions)) + else '%d:%d' % (l, u) + for l, u in self.dimensions)) if self.field_type == FBT_BLOB: if self.sub_type >= 0 and self.sub_type <= MAX_BLOBSUBTYPES: l.append(' SUB_TYPE %s' % BLOB_SUBTYPES[self.sub_type]) else: l.append(' SUB_TYPE %d' % self.sub_type) l.append(' SEGMENT SIZE %d' % self.segment_length) - if self.field_type in (FBT_CHAR,FBT_VARCHAR,FBT_BLOB): + if self.field_type in (FBT_CHAR, FBT_VARCHAR, FBT_BLOB): if self._attributes['RDB$CHARACTER_SET_ID'] is not None and \ (self.character_set.name != self.schema.default_character_set.name) or \ self._attributes['RDB$COLLATION_ID']: - if (self._attributes['RDB$CHARACTER_SET_ID'] is not None): + if self._attributes['RDB$CHARACTER_SET_ID'] is not None: l.append(' CHARACTER SET %s' % self.character_set.name) - if self._attributes['RDB$COLLATION_ID'] is not None: - cname = self.collation.name - if self.character_set._attributes['RDB$DEFAULT_COLLATE_NAME'] != cname: - l.append(' COLLATE %s' % cname) return ''.join(l) def _get_security_class(self): return self._attributes.get('RDB$SECURITY_CLASS') @@ -2105,53 +2241,27 @@ return self._attributes.get('RDB$OWNER_NAME') #--- Properties - - expression = LateBindingProperty(_get_expression,None,None, - "Expression that defines the COMPUTED BY column or None.") - validation = LateBindingProperty(_get_validation,None,None, - "CHECK constraint for the domain or None.") - default = LateBindingProperty(_get_default,None,None, - "Expression that defines the default value or None.") - length = LateBindingProperty(_get_length,None,None, - "Length of the column in bytes.") - scale = LateBindingProperty(_get_scale,None,None, - "Negative number representing the scale of NUMBER and DECIMAL column.") - field_type = LateBindingProperty(_get_field_type,None,None, - "Number code of the data type defined for the column.") - sub_type = LateBindingProperty(_get_sub_type,None,None,"BLOB subtype.") - segment_length = LateBindingProperty(_get_segment_length,None,None, - "For BLOB columns, a suggested length for BLOB buffers.") - external_length = LateBindingProperty(_get_external_length,None,None, - "Length of field as it is in an external table. Always 0 for regular tables.") - external_scale = LateBindingProperty(_get_external_scale,None,None, - "Scale factor of an integer field as it is in an external table.") - external_type = LateBindingProperty(_get_external_type,None,None, - "Data type of the field as it is in an external table.") - dimensions = LateBindingProperty(_get_dimensions,None,None, - "List of dimension definition pairs if column is an array type. Always empty for non-array columns.") - character_length = LateBindingProperty(_get_character_length,None,None, - "Length of CHAR and VARCHAR column, in characters (not bytes).") - collation = LateBindingProperty(_get_collation,None,None, - "Collation object for a character column or None.") - character_set = LateBindingProperty(_get_character_set,None,None, - "CharacterSet object for a character or text BLOB column, or None.") - precision = LateBindingProperty(_get_precision,None,None, - "Indicates the number of digits of precision available to the data type of the column.") - datatype = LateBindingProperty(_get_datatype,None,None, - "Comlete SQL datatype definition.") + expression = LateBindingProperty(_get_expression, doc="Expression that defines the COMPUTED BY column or None.") + validation = LateBindingProperty(_get_validation, doc="CHECK constraint for the domain or None.") + default = LateBindingProperty(_get_default, doc="Expression that defines the default value or None.") + length = LateBindingProperty(_get_length, doc="Length of the column in bytes.") + scale = LateBindingProperty(_get_scale, doc="Negative number representing the scale of NUMBER and DECIMAL column.") + field_type = LateBindingProperty(_get_field_type, doc="Number code of the data type defined for the column.") + sub_type = LateBindingProperty(_get_sub_type, doc="BLOB subtype.") + segment_length = LateBindingProperty(_get_segment_length, doc="For BLOB columns, a suggested length for BLOB buffers.") + external_length = LateBindingProperty(_get_external_length, doc="Length of field as it is in an external table. Always 0 for regular tables.") + external_scale = LateBindingProperty(_get_external_scale, doc="Scale factor of an integer field as it is in an external table.") + external_type = LateBindingProperty(_get_external_type, doc="Data type of the field as it is in an external table.") + dimensions = LateBindingProperty(_get_dimensions, doc="List of dimension definition pairs if column is an array type. Always empty for non-array columns.") + character_length = LateBindingProperty(_get_character_length, doc="Length of CHAR and VARCHAR column, in characters (not bytes).") + collation = LateBindingProperty(_get_collation, doc="Collation object for a character column or None.") + character_set = LateBindingProperty(_get_character_set, doc="CharacterSet object for a character or text BLOB column, or None.") + precision = LateBindingProperty(_get_precision, doc="Indicates the number of digits of precision available to the data type of the column.") + datatype = LateBindingProperty(_get_datatype, doc="Comlete SQL datatype definition.") # FB 3.0 - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class name or None.") - owner_name = LateBindingProperty(_get_owner_name,None,None,"Creator user name.") - + security_class = LateBindingProperty(_get_security_class, doc="Security class name or None.") + owner_name = LateBindingProperty(_get_owner_name, doc="Creator user name.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitDomain(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitDomain(self) def issystemobject(self): "Return True if this database object is system object." return (self._attributes['RDB$SYSTEM_FLAG'] == 1) or self.name.startswith('RDB$') @@ -2176,8 +2286,8 @@ Supported SQL actions: none """ - def __init__(self,schema,attributes): - super(Dependency,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Dependency, self).__init__(schema, attributes) self._strip_attribute('RDB$DEPENDENT_NAME') self._strip_attribute('RDB$DEPENDED_ON_NAME') @@ -2185,7 +2295,6 @@ self._strip_attribute('RDB$PACKAGE_NAME') #--- Protected - def _get_dependent_name(self): return self._attributes['RDB$DEPENDENT_NAME'] def _get_dependent_type(self): @@ -2238,7 +2347,7 @@ return None elif self.dependent_type == 17: # Collation return self.schema.get_collation(self.dependent_name) - elif self.dependent_type in [18,19]: # Package + package body + elif self.dependent_type in [18, 19]: # Package + package body return self.schema.get_package(self.dependent_name) return None def _get_depended_on(self): @@ -2249,7 +2358,11 @@ else: return t elif self.depended_on_type == 1: # VIEW - return self.schema.get_view(self.depended_on_name) + t = self.schema.get_view(self.depended_on_name) + if self.field_name: + return t.get_column(self.field_name) + else: + return t elif self.depended_on_type == 2: # TRIGGER return self.schema.get_trigger(self.depended_on_name) elif self.depended_on_type == 3: # COMPUTED FIELD (i.e. DOMAIN) @@ -2290,33 +2403,16 @@ return self.schema.get_package(self._attributes.get('RDB$PACKAGE_NAME')) #--- Properties - - dependent = LateBindingProperty(_get_dependent,None,None, - "Dependent database object.") - dependent_name = LateBindingProperty(_get_dependent_name,None,None, - "Dependent database object name.") - dependent_type = LateBindingProperty(_get_dependent_type,None,None, - "Dependent database object type.") - field_name = LateBindingProperty(_get_field_name,None,None, - "Name of one column in `depended on` object.") - depended_on = LateBindingProperty(_get_depended_on,None,None, - "Database object on which dependent depends.") - depended_on_name = LateBindingProperty(_get_depended_on_name,None,None, - "Name of db object on which dependent depends.") - depended_on_type = LateBindingProperty(_get_depended_on_type,None,None, - "Type of db object on which dependent depends.") + dependent = LateBindingProperty(_get_dependent, doc="Dependent database object.") + dependent_name = LateBindingProperty(_get_dependent_name, doc="Dependent database object name.") + dependent_type = LateBindingProperty(_get_dependent_type, doc="Dependent database object type.") + field_name = LateBindingProperty(_get_field_name, doc="Name of one column in `depended on` object.") + depended_on = LateBindingProperty(_get_depended_on, doc="Database object on which dependent depends.") + depended_on_name = LateBindingProperty(_get_depended_on_name, doc="Name of db object on which dependent depends.") + depended_on_type = LateBindingProperty(_get_depended_on_type, doc="Type of db object on which dependent depends.") # FB 3.0 - package = LateBindingProperty(_get_package,None,None, - ":class:`Package` instance if dependent depends on object in package or None.") - + package = LateBindingProperty(_get_package, doc=":class:`Package` instance if dependent depends on object in package or None.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitDependency(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitDependency(self) def issystemobject(self): "Returns True as dependency entries are considered as system objects." return True @@ -2338,8 +2434,8 @@ - Constraint on user table except NOT NULL constraint: create, drop - Constraint on system table: none """ - def __init__(self,schema,attributes): - super(Constraint,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Constraint, self).__init__(schema, attributes) self._strip_attribute('RDB$CONSTRAINT_NAME') self._strip_attribute('RDB$CONSTRAINT_TYPE') @@ -2354,12 +2450,11 @@ self._strip_attribute('RDB$DELETE_RULE') if not (self.issystemobject() or self.isnotnull()): - self._actions = ['create','drop'] + self._actions = ['create', 'drop'] #--- Protected - - def _get_create_sql(self,**params): - self._check_params(params,[]) + def _get_create_sql(self, **params): + self._check_params(params, []) const_def = 'ALTER TABLE %s ADD ' % self.table.get_quoted_name() if not self.name.startswith('INTEG_'): const_def += 'CONSTRAINT %s\n ' % self.get_quoted_name() @@ -2368,9 +2463,9 @@ elif self.ispkey() or self.isunique(): const_def += 'PRIMARY KEY' if self.ispkey() else 'UNIQUE' i = self.index - const_def += ' (%s)' % ','.join(i.segment_names) + const_def += ' (%s)' % ','.join(i.segment_names) if not i.issystemobject(): - const_def += '\n USING %s INDEX %s' % (i.index_type,i.get_quoted_name()) + const_def += '\n USING %s INDEX %s' % (i.index_type, i.get_quoted_name()) elif self.isfkey(): const_def += 'FOREIGN KEY (%s)\n ' % ','.join(self.index.segment_names) p = self.partner_constraint @@ -2382,12 +2477,12 @@ const_def += '\n ON UPDATE %s' % self.update_rule i = self.index if not i.issystemobject(): - const_def += '\n USING %s INDEX %s' % (i.index_type,i.get_quoted_name()) + const_def += '\n USING %s INDEX %s' % (i.index_type, i.get_quoted_name()) else: raise fdb.OperationalError("Unrecognized constraint type '%s'" % self.constraint_type) return const_def - def _get_drop_sql(self,**params): - self._check_params(params,[]) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'ALTER TABLE %s DROP CONSTRAINT %s' % (self.table.get_quoted_name(), self.get_quoted_name()) def _get_name(self): @@ -2420,36 +2515,17 @@ return self._attributes['RDB$DELETE_RULE'] #--- Properties - - constraint_type = LateBindingProperty(_get_constraint_type,None,None, - "primary key/unique/foreign key/check/not null.") - table = LateBindingProperty(_get_table,None,None, - ":class:`Table` instance this constraint applies to.") - index = LateBindingProperty(_get_index,None,None, - ":class:`Index` instance that enforces the constraint.\n`None` if constraint is not primary key/unique or foreign key.") - trigger_names = LateBindingProperty(_get_trigger_names,None,None, - "For a CHECK constraint contains trigger names that enforce the constraint.") - triggers = LateBindingProperty(_get_triggers,None,None, - "For a CHECK constraint contains :class:`Trigger` instances that enforce the constraint.") - column_name = LateBindingProperty(_get_column_name,None,None, - "For a NOT NULL constraint, this is the name of the column to which the constraint applies.") - partner_constraint = LateBindingProperty(_get_partner_constraint,None,None, - "For a FOREIGN KEY constraint, this is the unique or primary key :class:`Constraint` referred.") - match_option = LateBindingProperty(_get_match_option,None,None, - "For a FOREIGN KEY constraint only. Current value is FULL in all cases.") - update_rule = LateBindingProperty(_get_update_rule,None,None, - "For a FOREIGN KEY constraint, this is the action applicable to when primary key is updated.") - delete_rule = LateBindingProperty(_get_delete_rule,None,None, - "For a FOREIGN KEY constraint, this is the action applicable to when primary key is deleted.") - + constraint_type = LateBindingProperty(_get_constraint_type, doc="primary key/unique/foreign key/check/not null.") + table = LateBindingProperty(_get_table, doc=":class:`Table` instance this constraint applies to.") + index = LateBindingProperty(_get_index, doc=":class:`Index` instance that enforces the constraint.\n`None` if constraint is not primary key/unique or foreign key.") + trigger_names = LateBindingProperty(_get_trigger_names, doc="For a CHECK constraint contains trigger names that enforce the constraint.") + triggers = LateBindingProperty(_get_triggers, doc="For a CHECK constraint contains :class:`Trigger` instances that enforce the constraint.") + column_name = LateBindingProperty(_get_column_name, doc="For a NOT NULL constraint, this is the name of the column to which the constraint applies.") + partner_constraint = LateBindingProperty(_get_partner_constraint, doc="For a FOREIGN KEY constraint, this is the unique or primary key :class:`Constraint` referred.") + match_option = LateBindingProperty(_get_match_option, doc="For a FOREIGN KEY constraint only. Current value is FULL in all cases.") + update_rule = LateBindingProperty(_get_update_rule, doc="For a FOREIGN KEY constraint, this is the action applicable to when primary key is updated.") + delete_rule = LateBindingProperty(_get_delete_rule, doc="For a FOREIGN KEY constraint, this is the action applicable to when primary key is deleted.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitConstraint(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitConstraint(self) def issystemobject(self): "Returns True if this database object is system object." return self.schema.get_table(self._attributes['RDB$RELATION_NAME']).issystemobject() @@ -2480,11 +2556,13 @@ Supported SQL actions: - - User table: create, recreate, drop - - System table: none + - User table: create (no_pk=bool,no_unique=bool), + recreate (no_pk=bool,no_unique=bool), + drop, comment + - System table: comment """ - def __init__(self,schema,attributes): - super(Table,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Table, self).__init__(schema, attributes) self._type_code = [0,] self.__columns = None @@ -2494,73 +2572,86 @@ self._strip_attribute('RDB$SECURITY_CLASS') self._strip_attribute('RDB$DEFAULT_CLASS') + self._actions = ['comment'] if not self.issystemobject(): - self._actions = ['create','recreate','drop'] + self._actions.extend(['create', 'recreate', 'drop']) #--- Protected - - def _get_create_sql(self,**params): - self._check_params(params,[]) - tabdef = 'CREATE %sTABLE %s' % ('GLOBAL TEMPORARY ' if self.isgtt() else '', - self.get_quoted_name()) - if self.isexternal(): - tabdef += " EXTERNAL FILE '%s'\n" % self.external_file - tabdef += '\n(' - partdefs = [] - for col in self.columns: - coldef = '\n %s ' % col.get_quoted_name() - collate = '' - if col.isdomainbased(): - coldef += '%s' % col.domain.get_quoted_name() - elif col.iscomputed(): - coldef += 'COMPUTED BY %s' % col.get_computedby() - else: - datatype = col.datatype - if datatype.rfind(' COLLATE ') > 0: - datatype, collate = datatype.split(' COLLATE ') - coldef += '%s' % datatype - if col.isidentity(): - coldef += ' GENERATED BY DEFAULT AS IDENTITY' - if col.generator.inital_value != 0: - coldef += ' (START WITH %d)' % col.generator.inital_value - else: - if col.has_default(): - coldef += ' DEFAULT %s' % col.default - if not col.isnullable(): - coldef += ' NOT NULL' - if col._attributes['RDB$COLLATION_ID'] is not None: - cname = col.collation.name - if col.domain.character_set._attributes['RDB$DEFAULT_COLLATE_NAME'] != cname: - collate = cname + def _get_create_sql(self, **params): + try: + self._check_params(params, ['no_pk', 'no_unique']) + no_pk = params.get('no_pk', False) + no_unique = params.get('no_unique', False) + # + tabdef = 'CREATE %sTABLE %s' % ('GLOBAL TEMPORARY ' if self.isgtt() else '', + self.get_quoted_name()) + if self.isexternal(): + tabdef += " EXTERNAL FILE '%s'\n" % self.external_file + tabdef += ' (' + partdefs = [] + for col in self.columns: + coldef = '\n %s ' % col.get_quoted_name() + collate = '' + if col.isdomainbased(): + coldef += '%s' % col.domain.get_quoted_name() + elif col.iscomputed(): + coldef += 'COMPUTED BY %s' % col.get_computedby() + else: + datatype = col.datatype + if datatype.rfind(' COLLATE ') > 0: + datatype, collate = datatype.split(' COLLATE ') + coldef += '%s' % datatype + if col.isidentity(): + coldef += ' GENERATED BY DEFAULT AS IDENTITY' + if col.generator.inital_value != 0: + coldef += ' (START WITH %d)' % col.generator.inital_value + else: + if col.has_default(): + coldef += ' DEFAULT %s' % col.default + if not col.isnullable(): + coldef += ' NOT NULL' + if col._attributes['RDB$COLLATION_ID'] is not None: + # Sometimes RDB$COLLATION_ID has a garbage value + if col.collation is not None: + cname = col.collation.name + if col.domain.character_set._attributes['RDB$DEFAULT_COLLATE_NAME'] != cname: + collate = cname if collate: coldef += ' COLLATE %s' % collate - partdefs.append(coldef) - if self.has_pkey(): - pk = self.primary_key - pkdef = '\n ' - if not pk.name.startswith('INTEG_'): - pkdef += 'CONSTRAINT %s\n ' % pk.get_quoted_name() - i = pk.index - pkdef += 'PRIMARY KEY (%s)' % ','.join(i.segment_names) - if not i.issystemobject(): - pkdef += '\n USING %s INDEX %s' % (i.index_type,i.get_quoted_name()) - partdefs.append(pkdef) - for uq in self.constraints: - if uq.isunique(): - uqdef = '\n ' - if not uq.name.startswith('INTEG_'): - uqdef += 'CONSTRAINT %s\n ' % uq.get_quoted_name() - i = uq.index - uqdef += 'UNIQUE (%s)' % ','.join(i.segment_names) + partdefs.append(coldef) + if self.has_pkey() and not no_pk: + pk = self.primary_key + pkdef = '\n ' + if not pk.name.startswith('INTEG_'): + pkdef += 'CONSTRAINT %s\n ' % pk.get_quoted_name() + i = pk.index + pkdef += 'PRIMARY KEY (%s)' % ','.join(i.segment_names) if not i.issystemobject(): - uqdef += '\n USING %s INDEX %s' % (i.index_type,i.get_quoted_name()) - partdefs.append(uqdef) - tabdef += ','.join(partdefs) - tabdef += '\n)' - return tabdef - def _get_drop_sql(self,**params): - self._check_params(params,[]) + pkdef += '\n USING %s INDEX %s' % (i.index_type, i.get_quoted_name()) + partdefs.append(pkdef) + if not no_unique: + for uq in self.constraints: + if uq.isunique(): + uqdef = '\n ' + if not uq.name.startswith('INTEG_'): + uqdef += 'CONSTRAINT %s\n ' % uq.get_quoted_name() + i = uq.index + uqdef += 'UNIQUE (%s)' % ','.join(i.segment_names) + if not i.issystemobject(): + uqdef += '\n USING %s INDEX %s' % (i.index_type, i.get_quoted_name()) + partdefs.append(uqdef) + tabdef += ','.join(partdefs) + tabdef += '\n)' + return tabdef + except Exception as e: + raise e + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'DROP TABLE %s' % self.get_quoted_name() + def _get_comment_sql(self, **params): + return 'COMMENT ON TABLE %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$RELATION_NAME'] def _get_id(self): @@ -2583,25 +2674,23 @@ def _get_flags(self): return self._attributes['RDB$FLAGS'] def _get_indices(self): - return [i for i in self.schema._get_all_indices() - if i._attributes['RDB$RELATION_NAME'] == self.name] + return self.schema._get_all_indices().filter(lambda i: i._attributes['RDB$RELATION_NAME'] == self.name) def _get_triggers(self): - return [t for t in self.schema.triggers - if t._attributes['RDB$RELATION_NAME'] == self.name] + return self.schema.triggers.filter(lambda t: t._attributes['RDB$RELATION_NAME'] == self.name) def _get_constraints(self): - return [c for c in self.schema.constraints - if c._attributes['RDB$RELATION_NAME'] == self.name] + return self.schema.constraints.filter(lambda c: c._attributes['RDB$RELATION_NAME'] == self.name) def _get_columns(self): if self.__columns is None: - cols = ['RDB$FIELD_NAME','RDB$RELATION_NAME','RDB$FIELD_SOURCE', - 'RDB$FIELD_POSITION','RDB$UPDATE_FLAG','RDB$FIELD_ID', - 'RDB$DESCRIPTION','RDB$SECURITY_CLASS','RDB$SYSTEM_FLAG', - 'RDB$NULL_FLAG','RDB$DEFAULT_SOURCE','RDB$COLLATION_ID'] + cols = ['RDB$FIELD_NAME', 'RDB$RELATION_NAME', 'RDB$FIELD_SOURCE', + 'RDB$FIELD_POSITION', 'RDB$UPDATE_FLAG', 'RDB$FIELD_ID', + 'RDB$DESCRIPTION', 'RDB$SECURITY_CLASS', 'RDB$SYSTEM_FLAG', + 'RDB$NULL_FLAG', 'RDB$DEFAULT_SOURCE', 'RDB$COLLATION_ID'] if self.schema._con.ods >= fdb.ODS_FB_30: cols.extend(['RDB$GENERATOR_NAME', 'RDB$IDENTITY_TYPE']) - self.__columns = [TableColumn(self.schema,self,row) for row in - self.schema._select("""select %s from RDB$RELATION_FIELDS -where RDB$RELATION_NAME = ? order by RDB$FIELD_POSITION""" % ','.join(cols),(self.name,))] + self.__columns = ObjectList((TableColumn(self.schema, self, row) for row in + self.schema._select("""select %s from RDB$RELATION_FIELDS +where RDB$RELATION_NAME = ? order by RDB$FIELD_POSITION""" % ','.join(cols), (self.name,))), TableColumn, 'item.name') + self.__columns.freeze() return self.__columns def _get_primary_key(self): for const in self.constraints: @@ -2609,65 +2698,42 @@ return const return None def _get_foreign_keys(self): - return [c for c in self.constraints if c.isfkey()] + return self.constraints.filter(lambda c: c.isfkey()) def _get_privileges(self): - return [p for p in self.schema.privileges - if ((p.subject_name == self.name) and - (p.subject_type in self._type_code))] + return self.schema.privileges.filter(lambda p: ((p.subject_name == self.name) and + (p.subject_type in self._type_code))) #--- Properties - - id = LateBindingProperty(_get_id,None,None,"Internam number ID for the table.") - dbkey_length = LateBindingProperty(_get_dbkey_length,None,None, - "Length of the RDB$DB_KEY column in bytes.") - format = LateBindingProperty(_get_format,None,None, - "Internal format ID for the table.") - table_type = LateBindingProperty(_get_table_type,None,None,"Table type.") - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class that define access limits to the table.") - external_file = LateBindingProperty(_get_external_file,None,None, - "Full path to the external data file, if any.") - owner_name = LateBindingProperty(_get_owner_name,None,None, - "User name of table's creator.") - default_class = LateBindingProperty(_get_default_class,None,None, - "Default security class.") - flags = LateBindingProperty(_get_flags,None,None,"Internal flags.") - primary_key = LateBindingProperty(_get_primary_key,None,None, - "PRIMARY KEY :class:`Constraint` for this table or None.") - foreign_keys = LateBindingProperty(_get_foreign_keys,None,None, - "List of FOREIGN KEY :class:`Constraint` instances for this table.") - columns = LateBindingProperty(_get_columns,None,None, - "Returns list of columns defined for table.\nItems are :class:`TableColumn` objects.") - constraints = LateBindingProperty(_get_constraints,None,None, - "Returns list of constraints defined for table.\nItems are :class:`Constraint` objects.") - indices = LateBindingProperty(_get_indices,None,None, - "Returns list of indices defined for table.\nItems are :class:`Index` objects.") - triggers = LateBindingProperty(_get_triggers,None,None, - "Returns list of triggers defined for table.\nItems are :class:`Trigger` objects.") - privileges = LateBindingProperty(_get_privileges,None,None, - "List of :class:`Privilege` objects granted to this object.") - # FB 3.0 - + id = LateBindingProperty(_get_id, doc="Internam number ID for the table.") + dbkey_length = LateBindingProperty(_get_dbkey_length, doc="Length of the RDB$DB_KEY column in bytes.") + format = LateBindingProperty(_get_format, doc="Internal format ID for the table.") + table_type = LateBindingProperty(_get_table_type, doc="Table type.") + security_class = LateBindingProperty(_get_security_class, doc="Security class that define access limits to the table.") + external_file = LateBindingProperty(_get_external_file, doc="Full path to the external data file, if any.") + owner_name = LateBindingProperty(_get_owner_name, doc="User name of table's creator.") + default_class = LateBindingProperty(_get_default_class, doc="Default security class.") + flags = LateBindingProperty(_get_flags, doc="Internal flags.") + primary_key = LateBindingProperty(_get_primary_key, doc="PRIMARY KEY :class:`Constraint` for this table or None.") + foreign_keys = LateBindingProperty(_get_foreign_keys, doc=":class:`~fdb.utils.ObjectList` of FOREIGN KEY :class:`Constraint` instances for this table.") + columns = LateBindingProperty(_get_columns, doc="Returns :class:`~fdb.utils.ObjectList` of columns defined for table.\nItems are :class:`TableColumn` objects.") + constraints = LateBindingProperty(_get_constraints, doc="Returns :class:`~fdb.utils.ObjectList` of constraints defined for table.\nItems are :class:`Constraint` objects.") + indices = LateBindingProperty(_get_indices, doc="Returns :class:`~fdb.utils.ObjectList` of indices defined for table.\nItems are :class:`Index` objects.") + triggers = LateBindingProperty(_get_triggers, doc="Returns :class:`~fdb.utils.ObjectList` of triggers defined for table.\nItems are :class:`Trigger` objects.") + privileges = LateBindingProperty(_get_privileges, doc=":class:`~fdb.utils.ObjectList` of :class:`Privilege` objects granted to this object.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitTable(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitTable(self) - def get_column(self,name): + def get_column(self, name): "Return :class:`TableColumn` object with specified name." - for col in self.columns: - if col.name == name: - return col - return None + return self.columns.get(name) + #for col in self.columns: + #if col.name == name: + #return col + #return None def isgtt(self): "Returns True if table is GLOBAL TEMPORARY table." return self.table_type.startswith('GLOBAL_TEMPORARY') def ispersistent(self): "Returns True if table is persistent one." - return self.table_type in ['PERSISTENT','EXTERNAL'] + return self.table_type in ['PERSISTENT', 'EXTERNAL'] def isexternal(self): "Returns True if table is external table." return bool(self.external_file) @@ -2690,11 +2756,11 @@ Supported SQL actions: - User views: create, recreate, alter(columns=string_or_list,query=string,check=bool), - create_or_alter, drop - - System views: none + create_or_alter, drop, comment + - System views: comment """ - def __init__(self,schema,attributes): - super(View,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(View, self).__init__(schema, attributes) self._type_code = [1,] self.__columns = None @@ -2705,31 +2771,38 @@ self._strip_attribute('RDB$SECURITY_CLASS') self._strip_attribute('RDB$DEFAULT_CLASS') + self._actions = ['comment'] if not self.issystemobject(): - self._actions = ['create','recreate','alter','create_or_alter','drop'] + self._actions.extend(['create', 'recreate', 'alter', 'create_or_alter', 'drop']) #--- Protected - - def _get_create_sql(self,**params): - self._check_params(params,[]) + def _get_create_sql(self, **params): + self._check_params(params, []) return "CREATE VIEW %s (%s)\n AS\n %s" % (self.get_quoted_name(), - ','.join([col.get_quoted_name() for col in self.columns]),self.sql) - def _get_alter_sql(self,**params): - self._check_params(params,['columns','query','check']) + ','.join([col.get_quoted_name() + for col in self.columns]), + self.sql) + def _get_alter_sql(self, **params): + self._check_params(params, ['columns', 'query', 'check']) columns = params.get('columns') - if isinstance(columns,(list,tuple)): + if isinstance(columns, (list, tuple)): columns = ','.join(columns) query = params.get('query') - check = params.get('check',False) + check = params.get('check', False) if query: return "ALTER VIEW %s %s\n AS\n %s" % (self.get_quoted_name(), - '(%s)' % columns if columns else '', - '%s\n WITH CHECK OPTION' % query if check else query) + '(%s)' % columns if columns else '', + '%s\n WITH CHECK OPTION' % query if check + else query) else: raise fdb.ProgrammingError("Missing required parameter: 'query'.") - def _get_drop_sql(self,**params): - self._check_params(params,[]) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'DROP VIEW %s' % self.get_quoted_name() + def _get_comment_sql(self, **params): + return 'COMMENT ON VIEW %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$RELATION_NAME'] def _get_sql(self): @@ -2749,12 +2822,11 @@ def _get_flags(self): return self._attributes['RDB$FLAGS'] def _get_triggers(self): - return [t for t in self.schema.triggers - if t._attributes['RDB$RELATION_NAME'] == self.name] + return self.schema.triggers.filter(lambda t: t._attributes['RDB$RELATION_NAME'] == self.name) def _get_columns(self): if self.__columns is None: - self.__columns = [ViewColumn(self.schema,self,row) for row in - self.schema._select("""select r.RDB$FIELD_NAME, r.RDB$RELATION_NAME, + self.__columns = ObjectList((ViewColumn(self.schema, self, row) for row in + self.schema._select("""select r.RDB$FIELD_NAME, r.RDB$RELATION_NAME, r.RDB$FIELD_SOURCE, r.RDB$FIELD_POSITION, r.RDB$UPDATE_FLAG, r.RDB$FIELD_ID, r.RDB$DESCRIPTION, r.RDB$SYSTEM_FLAG, r.RDB$SECURITY_CLASS, r.RDB$NULL_FLAG, r.RDB$DEFAULT_SOURCE, r.RDB$COLLATION_ID, r.RDB$BASE_FIELD, @@ -2762,48 +2834,33 @@ from RDB$RELATION_FIELDS r left join RDB$VIEW_RELATIONS v on r.RDB$VIEW_CONTEXT = v.RDB$VIEW_CONTEXT and v.rdb$view_name = ? where r.RDB$RELATION_NAME = ? - order by RDB$FIELD_POSITION""",(self.name,self.name))] + order by RDB$FIELD_POSITION""", (self.name, self.name))), ViewColumn, 'item.name') + self.__columns.freeze() return self.__columns def _get_privileges(self): - return [p for p in self.schema.privileges - if ((p.subject_name == self.name) and - (p.subject_type == 0))] # Views are logged as Tables in RDB$USER_PRIVILEGES + return self.schema.privileges.filter(lambda p: ((p.subject_name == self.name) and + (p.subject_type == 0))) # Views are logged as Tables in RDB$USER_PRIVILEGES #--- Properties - - id = LateBindingProperty(_get_id,None,None,"Internal number ID for the view.") - sql= LateBindingProperty(_get_sql,None,None,"The query specification.") - dbkey_length = LateBindingProperty(_get_dbkey_length,None,None, - "Length of the RDB$DB_KEY column in bytes.") - format = LateBindingProperty(_get_format,None,None,"Internal format ID for the view.") - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class that define access limits to the view.") - owner_name = LateBindingProperty(_get_owner_name,None,None,"User name of view's creator.") - default_class = LateBindingProperty(_get_default_class,None,None,"Default security class.") - flags = LateBindingProperty(_get_flags,None,None,"Internal flags.") - - columns = LateBindingProperty(_get_columns,None,None, - "Returns list of columns defined for view.\nItems are :class:`ViewColumn` objects.") - triggers = LateBindingProperty(_get_triggers,None,None, - "Returns list of triggers defined for view.\nItems are :class:`Trigger` objects.") - privileges = LateBindingProperty(_get_privileges,None,None, - "List of :class:`Privilege` objects granted to this object.") - + id = LateBindingProperty(_get_id, doc="Internal number ID for the view.") + sql = LateBindingProperty(_get_sql, doc="The query specification.") + dbkey_length = LateBindingProperty(_get_dbkey_length, doc="Length of the RDB$DB_KEY column in bytes.") + format = LateBindingProperty(_get_format, doc="Internal format ID for the view.") + security_class = LateBindingProperty(_get_security_class, doc="Security class that define access limits to the view.") + owner_name = LateBindingProperty(_get_owner_name, doc="User name of view's creator.") + default_class = LateBindingProperty(_get_default_class, doc="Default security class.") + flags = LateBindingProperty(_get_flags, doc="Internal flags.") + columns = LateBindingProperty(_get_columns, doc="Returns :class:`~fdb.utils.ObjectList` of columns defined for view.\nItems are :class:`ViewColumn` objects.") + triggers = LateBindingProperty(_get_triggers, doc="Returns :class:`~fdb.utils.ObjectList` of triggers defined for view.\nItems are :class:`Trigger` objects.") + privileges = LateBindingProperty(_get_privileges, doc=":class:`~fdb.utils.ObjectList` of :class:`Privilege` objects granted to this object.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitView(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitView(self) - def get_column(self,name): + def get_column(self, name): "Return :class:`TableColumn` object with specified name." for col in self.columns: if col.name == name: return col return None - def get_trigger(self,name): + def get_trigger(self, name): "Return :class:`Trigger` object with specified name." for t in self.triggers: if t.name == name: @@ -2818,13 +2875,13 @@ Supported SQL actions: - - User trigger: create, recreate, create_or_alter, drop, + - User trigger: create(inactive=bool), recreate, create_or_alter, drop, alter(fire_on=string,active=bool,sequence=int,declare=string_or_list, - code=string_or_list) - - System trigger: none + code=string_or_list), comment + - System trigger: comment """ - def __init__(self,schema,attributes): - super(Trigger,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Trigger, self).__init__(schema, attributes) self._type_code = [2,] self._strip_attribute('RDB$TRIGGER_NAME') @@ -2832,22 +2889,23 @@ self._strip_attribute('RDB$ENGINE_NAME') self._strip_attribute('RDB$ENTRYPOINT') + self._actions = ['comment'] if not self.issystemobject(): - self._actions = ['create','recreate','alter','create_or_alter','drop'] + self._actions.extend(['create', 'recreate', 'alter', 'create_or_alter', 'drop']) #--- Protected - - def _get_create_sql(self,**params): - self._check_params(params,[]) + def _get_create_sql(self, **params): + self._check_params(params, ['inactive']) + inactive = params.get('inactive', False) result = 'CREATE TRIGGER %s' % self.get_quoted_name() if self._attributes['RDB$RELATION_NAME']: result += ' FOR %s' % self.relation.get_quoted_name() - result += ' %s\n%s POSITION %d\n%s' % ('ACTIVE' if self.isactive() else 'INACTIVE', + result += ' %s\n%s POSITION %d\n%s' % ('ACTIVE' if self.isactive() and not inactive else 'INACTIVE', self.get_type_as_string(), - self.sequence,self.source) + self.sequence, self.source) return result - def _get_alter_sql(self,**params): - self._check_params(params,['fire_on','active','sequence','declare','code']) + def _get_alter_sql(self, **params): + self._check_params(params, ['fire_on', 'active', 'sequence', 'declare', 'code']) action = params.get('fire_on') active = params.get('active') sequence = params.get('sequence') @@ -2859,8 +2917,7 @@ header += ' ACTIVE' if active else ' INACTIVE' if action is not None: dbaction = action.upper().startswith('ON ') - if ((dbaction and not self.isdbtrigger()) - or (not dbaction and self.isdbtrigger())): + if (dbaction and not self.isdbtrigger()) or (not dbaction and self.isdbtrigger()): raise fdb.ProgrammingError("Trigger type change is not allowed.") header += '\n %s' % action if sequence is not None: @@ -2869,34 +2926,38 @@ if code is not None: if declare is None: d = '' - elif isinstance(declare,(list,tuple)): + elif isinstance(declare, (list, tuple)): d = '' for x in declare: d += ' %s\n' % x else: d = '%s\n' % declare - if isinstance(code,(list,tuple)): + if isinstance(code, (list, tuple)): c = '' for x in code: c += ' %s\n' % x else: c = '%s\n' % code - body = '\nAS\n%sBEGIN\n%sEND' % (d,c) + body = '\nAS\n%sBEGIN\n%sEND' % (d, c) else: body = '' # if not (header or body): raise fdb.ProgrammingError("Header or body definition required.") - return 'ALTER TRIGGER %s%s%s' % (self.get_quoted_name(),header,body) - def _get_drop_sql(self,**params): - self._check_params(params,[]) + return 'ALTER TRIGGER %s%s%s' % (self.get_quoted_name(), header, body) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'DROP TRIGGER %s' % self.get_quoted_name() + def _get_comment_sql(self, **params): + return 'COMMENT ON TRIGGER %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_action_time(self): if self.isddltrigger(): return (self.trigger_type) & 1 else: return (self.trigger_type + 1) & 1 - def _get_action_type(self,slot): + def _get_action_type(self, slot): if self.isddltrigger(): return (self.trigger_type & ~TRIGGER_TYPE_DDL) >> 1 else: @@ -2924,7 +2985,7 @@ return self._attributes.get('RDB$ENGINE_NAME') def _get_entrypoint(self): return self._attributes.get('RDB$ENTRYPOINT') - def _istype(self,type_code): + def _istype(self, type_code): atype = self._get_action_type(1) if atype == type_code: return True @@ -2937,29 +2998,16 @@ return False #--- Properties - - relation = LateBindingProperty(_get_relation,None,None, - ":class:`Table` or :class:`View` that the trigger is for, or None for database triggers") - sequence = LateBindingProperty(_get_sequence,None,None, - "Sequence (position) of trigger. Zero usually means no sequence defined.") - trigger_type = LateBindingProperty(_get_trigger_type,None,None, - "Numeric code for trigger type that define what event and when are covered by trigger.") - source = LateBindingProperty(_get_source,None,None,"PSQL source code.") - flags = LateBindingProperty(_get_flags,None,None,"Internal flags.") - valid_blr = LateBindingProperty(_get_valid_blr,None,None, - "Trigger BLR invalidation flag. Coul be True/False or None.") + relation = LateBindingProperty(_get_relation, doc=":class:`Table` or :class:`View` that the trigger is for, or None for database triggers") + sequence = LateBindingProperty(_get_sequence, doc="Sequence (position) of trigger. Zero usually means no sequence defined.") + trigger_type = LateBindingProperty(_get_trigger_type, doc="Numeric code for trigger type that define what event and when are covered by trigger.") + source = LateBindingProperty(_get_source, doc="PSQL source code.") + flags = LateBindingProperty(_get_flags, doc="Internal flags.") + valid_blr = LateBindingProperty(_get_valid_blr, doc="Trigger BLR invalidation flag. Coul be True/False or None.") # FB 3 - engine_name = LateBindingProperty(_get_engine_name,None,None,"Engine name.") - entrypoint = LateBindingProperty(_get_entrypoint,None,None,"Entrypoint.") - + engine_name = LateBindingProperty(_get_engine_name, doc="Engine name.") + entrypoint = LateBindingProperty(_get_entrypoint, doc="Entrypoint.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitTrigger(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitTrigger(self) def isactive(self): "Returns True if this trigger is active." return self._attributes['RDB$TRIGGER_INACTIVE'] == 0 @@ -3009,10 +3057,10 @@ class ProcedureParameter(BaseSchemaItem): """Represents procedure parameter. - Supported SQL actions: none. + Supported SQL actions: comment """ - def __init__(self,schema,proc,attributes): - super(ProcedureParameter,self).__init__(schema,attributes) + def __init__(self, schema, proc, attributes): + super(ProcedureParameter, self).__init__(schema, attributes) self.__proc = proc self._strip_attribute('RDB$PARAMETER_NAME') @@ -3022,8 +3070,14 @@ self._strip_attribute('RDB$FIELD_NAME') self._strip_attribute('RDB$PACKAGE_NAME') - #--- Protected + self._actions = ['comment'] + #--- Protected + def _get_comment_sql(self, **params): + return 'COMMENT ON PARAMETER %s.%s IS %s' % (self.procedure.get_quoted_name(), + self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$PARAMETER_NAME'] def _get_procedure(self): @@ -3058,7 +3112,7 @@ def _get_collation(self): cid = self._attributes.get('RDB$COLLATION_ID') return (None if cid is None - else self.schema.get_collation_by_id(self.domain._attributes['RDB$CHARACTER_SET_ID'],cid)) + else self.schema.get_collation_by_id(self.domain._attributes['RDB$CHARACTER_SET_ID'], cid)) def _get_mechanism(self): return self._attributes.get('RDB$PARAMETER_MECHANISM') def _get_column(self): @@ -3069,32 +3123,20 @@ return self.schema.get_package(self._attributes.get('RDB$PACKAGE_NAME')) #--- Properties - - procedure = LateBindingProperty(_get_procedure,None,None,"Name of the stored procedure.") - sequence = LateBindingProperty(_get_sequence,None,None,"Sequence (position) of parameter.") - domain = LateBindingProperty(_get_domain,None,None,":class:`Domain` for this parameter.") - datatype = LateBindingProperty(_get_datatype,None,None,"Comlete SQL datatype definition.") - type_from = LateBindingProperty(_get_type_from,None,None, - "Numeric code. See :attr:`Schema.enum_param_type_from`.`") + procedure = LateBindingProperty(_get_procedure, doc="Name of the stored procedure.") + sequence = LateBindingProperty(_get_sequence, doc="Sequence (position) of parameter.") + domain = LateBindingProperty(_get_domain, doc=":class:`Domain` for this parameter.") + datatype = LateBindingProperty(_get_datatype, doc="Comlete SQL datatype definition.") + type_from = LateBindingProperty(_get_type_from, doc="Numeric code. See :attr:`Schema.enum_param_type_from`.`") # FB 2.1 - default = LateBindingProperty(_get_default,None,None,"Default value.") - collation = LateBindingProperty(_get_collation,None,None, - ":class:`collation` for this parameter.") - mechanism = LateBindingProperty(_get_mechanism,None,None,"Parameter mechanism code.") + default = LateBindingProperty(_get_default, doc="Default value.") + collation = LateBindingProperty(_get_collation, doc=":class:`collation` for this parameter.") + mechanism = LateBindingProperty(_get_mechanism, doc="Parameter mechanism code.") # FB 2.5 - column = LateBindingProperty(_get_column,None,None,":class:`TableColumn` for this parameter.") + column = LateBindingProperty(_get_column, doc=":class:`TableColumn` for this parameter.") # FB 3.0 - package = LateBindingProperty(_get_package,None,None, - "Package this procedure belongs to. \nObject is :class:`Package` instance or None.") - + package = LateBindingProperty(_get_package, doc="Package this procedure belongs to. \nObject is :class:`Package` instance or None.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitProcedureParameter(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitProcedureParameter(self) def get_sql_definition(self): "Returns SQL definition for parameter." typedef = self.datatype @@ -3105,8 +3147,8 @@ elif self.type_from == PROCPAR_TYPE_OF_COLUMN: typedef = 'TYPE OF COLUMN %s.%s' % (self.column.table.get_quoted_name(), self.column.get_quoted_name()) - result = '%s %s%s' % (self.get_quoted_name(),typedef, - '' if self.isnullable() else ' NOT NULL') + result = '%s %s%s' % (self.get_quoted_name(), typedef, + '' if self.isnullable() else ' NOT NULL') c = self.collation if c is not None: result += ' COLLATE %s' % c.get_quoted_name() @@ -3134,14 +3176,14 @@ - User procedure: create(no_code=bool), recreate(no_code=bool), create_or_alter(no_code=bool), drop, alter(input=string_or_list,output=string_or_list,declare=string_or_list, - code=string_or_list) - - System procedure: none + code=string_or_list), comment + - System procedure: comment """ - def __init__(self,schema,attributes): - super(Procedure,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Procedure, self).__init__(schema, attributes) self._type_code = [5,] - self.__inputParams = self.__outputParams = None + self.__input_params = self.__output_params = None self._strip_attribute('RDB$PROCEDURE_NAME') self._strip_attribute('RDB$OWNER_NAME') @@ -3152,13 +3194,13 @@ self.__ods = schema._con.ods + self._actions = ['comment'] if not self.issystemobject(): - self._actions = ['create','recreate','alter','create_or_alter','drop'] + self._actions.extend(['create', 'recreate', 'alter', 'create_or_alter', 'drop']) #--- Protected - - def _get_create_sql(self,**params): - self._check_params(params,['no_code']) + def _get_create_sql(self, **params): + self._check_params(params, ['no_code']) no_code = params.get('no_code') result = 'CREATE PROCEDURE %s' % self.get_quoted_name() if self.has_input(): @@ -3183,19 +3225,21 @@ '' if p.sequence+1 == self._attributes['RDB$PROCEDURE_OUTPUTS'] else ',') result += ')\n' - return result+'AS\n'+('BEGIN\nEND' if no_code else self.source) - def _get_alter_sql(self,**params): - self._check_params(params,['input','output','declare','code']) + return result+'AS\n'+(('BEGIN\nEND' if self.proc_type != 1 + else 'BEGIN\n SUSPEND;\nEND') + if no_code else self.source) + def _get_alter_sql(self, **params): + self._check_params(params, ['input', 'output', 'declare', 'code']) inpars = params.get('input') outpars = params.get('output') declare = params.get('declare') code = params.get('code') - if code is None: + if 'code' not in params: raise fdb.ProgrammingError("Missing required parameter: 'code'.") # header = '' if inpars is not None: - if isinstance(inpars,(list,tuple)): + if isinstance(inpars, (list, tuple)): numpars = len(inpars) if numpars == 1: header = ' (%s)\n' % inpars @@ -3203,7 +3247,7 @@ header = ' (\n' i = 1 for p in inpars: - header += ' %s%s\n' % (p,'' if i == numpars else ',') + header += ' %s%s\n' % (p, '' if i == numpars else ',') i += 1 header += ')\n' else: @@ -3212,7 +3256,7 @@ if outpars is not None: if not header: header += '\n' - if isinstance(outpars,(list,tuple)): + if isinstance(outpars, (list, tuple)): numpars = len(outpars) if numpars == 1: header += 'RETURNS (%s)\n' % outpars @@ -3220,7 +3264,7 @@ header += 'RETURNS (\n' i = 1 for p in outpars: - header += ' %s%s\n' % (p,'' if i == numpars else ',') + header += ' %s%s\n' % (p, '' if i == numpars else ',') i += 1 header += ')\n' else: @@ -3229,35 +3273,39 @@ if code: if declare is None: d = '' - elif isinstance(declare,(list,tuple)): + elif isinstance(declare, (list, tuple)): d = '' for x in declare: d += ' %s\n' % x else: d = '%s\n' % declare - if isinstance(code,(list,tuple)): + if isinstance(code, (list, tuple)): c = '' for x in code: c += ' %s\n' % x else: c = '%s\n' % code - body = '%sAS\n%sBEGIN\n%sEND' % ('' if header else '\n',d,c) + body = '%sAS\n%sBEGIN\n%sEND' % ('' if header else '\n', d, c) else: body = '%sAS\nBEGIN\nEND' % ('' if header else '\n') # - return 'ALTER PROCEDURE %s%s%s' % (self.get_quoted_name(),header,body) - def _get_drop_sql(self,**params): - self._check_params(params,[]) + return 'ALTER PROCEDURE %s%s%s' % (self.get_quoted_name(), header, body) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'DROP PROCEDURE %s' % self.get_quoted_name() + def _get_comment_sql(self, **params): + return 'COMMENT ON PROCEDURE %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def __param_columns(self): - cols = ['RDB$PARAMETER_NAME','RDB$PROCEDURE_NAME','RDB$PARAMETER_NUMBER', - 'RDB$PARAMETER_TYPE','RDB$FIELD_SOURCE','RDB$DESCRIPTION', + cols = ['RDB$PARAMETER_NAME', 'RDB$PROCEDURE_NAME', 'RDB$PARAMETER_NUMBER', + 'RDB$PARAMETER_TYPE', 'RDB$FIELD_SOURCE', 'RDB$DESCRIPTION', 'RDB$SYSTEM_FLAG'] if self.__ods >= fdb.ODS_FB_21: - cols.extend(['RDB$DEFAULT_SOURCE','RDB$COLLATION_ID','RDB$NULL_FLAG', + cols.extend(['RDB$DEFAULT_SOURCE', 'RDB$COLLATION_ID', 'RDB$NULL_FLAG', 'RDB$PARAMETER_MECHANISM']) if self.__ods >= fdb.ODS_FB_25: - cols.extend(['RDB$FIELD_NAME','RDB$RELATION_NAME']) + cols.extend(['RDB$FIELD_NAME', 'RDB$RELATION_NAME']) if self.__ods >= fdb.ODS_FB_30: cols.extend(['RDB$PACKAGE_NAME']) return ','.join(cols) @@ -3272,36 +3320,37 @@ def _get_owner_name(self): return self._attributes['RDB$OWNER_NAME'] def _get_input_params(self): - if self.__inputParams is None: + sql = """select %s from rdb$procedure_parameters where rdb$procedure_name = ? +and rdb$parameter_type = 0 order by rdb$parameter_number""" + if self.__input_params is None: if self.has_input(): - self.__inputParams = [ProcedureParameter(self.schema,self,row) for row in - self.schema._select("""select %s from rdb$procedure_parameters -where rdb$procedure_name = ? -and rdb$parameter_type = 0 -order by rdb$parameter_number""" % self.__param_columns(),(self.name,))] + self.__input_params = ObjectList((ProcedureParameter(self.schema, self, row) for row in + self.schema._select(sql % self.__param_columns(), (self.name,))), + ProcedureParameter, 'item.name') else: - self.__inputParams = [] - return self.__inputParams + self.__input_params = ObjectList() + self.__input_params.freeze() + return self.__input_params def _get_output_params(self): - if self.__outputParams is None: + sql = """select %s from rdb$procedure_parameters where rdb$procedure_name = ? +and rdb$parameter_type = 1 order by rdb$parameter_number""" + if self.__output_params is None: if self.has_output(): - self.__outputParams = [ProcedureParameter(self.schema,self,row) for row in - self.schema._select("""select %s from rdb$procedure_parameters -where rdb$procedure_name = ? -and rdb$parameter_type = 1 -order by rdb$parameter_number""" % self.__param_columns(),(self.name,))] + self.__output_params = ObjectList((ProcedureParameter(self.schema, self, row) for row in + self.schema._select(sql % self.__param_columns(), (self.name,))), + ProcedureParameter, 'item.name') else: - self.__outputParams = [] - return self.__outputParams + self.__output_params = ObjectList() + self.__output_params.freeze() + return self.__output_params def _get_proc_type(self): - return self._attributes.get('RDB$PROCEDURE_TYPE',0) + return self._attributes.get('RDB$PROCEDURE_TYPE', 0) def _get_valid_blr(self): result = self._attributes.get('RDB$VALID_BLR') return bool(result) if result is not None else None def _get_privileges(self): - return [p for p in self.schema.privileges - if ((p.subject_name == self.name) and - (p.subject_type in self._type_code))] + return self.schema.privileges.filter(lambda p: ((p.subject_name == self.name) and + (p.subject_type in self._type_code))) def _get_engine_name(self): return self._attributes.get('RDB$ENGINE_NAME') def _get_entrypoint(self): @@ -3312,40 +3361,23 @@ return self._attributes.get('RDB$PRIVATE_FLAG') #--- Properties - - id = LateBindingProperty(_get_id,None,None,"Internal unique ID number.") - source = LateBindingProperty(_get_source,None,None,"PSQL source code.") - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class that define access limits to the procedure.") - owner_name = LateBindingProperty(_get_owner_name,None,None, - "User name of procedure's creator.") - input_params = LateBindingProperty(_get_input_params,None,None, - "List of input parameters.\nInstances are :class:`ProcedureParameter` instances.") - output_params = LateBindingProperty(_get_output_params,None,None, - "List of output parameters.\nInstances are :class:`ProcedureParameter` instances.") - privileges = LateBindingProperty(_get_privileges,None,None, - "List of :class:`Privilege` objects granted to this object.") + id = LateBindingProperty(_get_id, doc="Internal unique ID number.") + source = LateBindingProperty(_get_source, doc="PSQL source code.") + security_class = LateBindingProperty(_get_security_class, doc="Security class that define access limits to the procedure.") + owner_name = LateBindingProperty(_get_owner_name, doc="User name of procedure's creator.") + input_params = LateBindingProperty(_get_input_params, doc=":class:`~fdb.utils.ObjectList` of input parameters.\nInstances are :class:`ProcedureParameter` instances.") + output_params = LateBindingProperty(_get_output_params, doc=":class:`~fdb.utils.ObjectList` of output parameters.\nInstances are :class:`ProcedureParameter` instances.") + privileges = LateBindingProperty(_get_privileges, doc=":class:`~fdb.utils.ObjectList` of :class:`Privilege` objects granted to this object.") # FB 2.1 - proc_type = LateBindingProperty(_get_proc_type,None,None, - "Procedure type code. See :attr:`fdb.Connection.enum_procedure_types`.") - valid_blr = LateBindingProperty(_get_valid_blr,None,None, - "Procedure BLR invalidation flag. Coul be True/False or None.") + proc_type = LateBindingProperty(_get_proc_type, doc="Procedure type code. See :attr:`~fdb.schema.Schema.enum_procedure_types`.") + valid_blr = LateBindingProperty(_get_valid_blr, doc="Procedure BLR invalidation flag. Coul be True/False or None.") # FB 3.0 - engine_name = LateBindingProperty(_get_engine_name,None,None,"Engine name.") - entrypoint = LateBindingProperty(_get_entrypoint,None,None,"Entrypoint.") - package = LateBindingProperty(_get_package,None,None, - "Package this procedure belongs to. \nObject is :class:`Package` instance or None.") - privacy = LateBindingProperty(_get_privacy,None,None,"Privacy flag.") - + engine_name = LateBindingProperty(_get_engine_name, doc="Engine name.") + entrypoint = LateBindingProperty(_get_entrypoint, doc="Entrypoint.") + package = LateBindingProperty(_get_package, doc="Package this procedure belongs to. \nObject is :class:`Package` instance or None.") + privacy = LateBindingProperty(_get_privacy, doc="Privacy flag.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitProcedure(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitProcedure(self) - def get_param(self,name): + def get_param(self, name): "Returns :class:`ProcedureParameter` with specified name or None" for p in self.output_params: if p.name == name: @@ -3369,28 +3401,32 @@ Supported SQL actions: - - User role: create, drop - - System role: none + - User role: create, drop, comment + - System role: comment """ - def __init__(self,schema,attributes): - super(Role,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Role, self).__init__(schema, attributes) self._type_code = [13,] self._strip_attribute('RDB$ROLE_NAME') self._strip_attribute('RDB$OWNER_NAME') self._strip_attribute('RDB$SECURITY_CLASS') + self._actions = ['comment'] if not self.issystemobject(): - self._actions = ['create','drop'] + self._actions.extend(['create', 'drop']) #--- Protected - - def _get_create_sql(self,**params): - self._check_params(params,[]) + def _get_create_sql(self, **params): + self._check_params(params, []) return 'CREATE ROLE %s' % self.get_quoted_name() - def _get_drop_sql(self,**params): - self._check_params(params,[]) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'DROP ROLE %s' % self.get_quoted_name() + def _get_comment_sql(self, **params): + return 'COMMENT ON ROLE %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) def _get_name(self): return self._attributes['RDB$ROLE_NAME'] def _get_owner_name(self): @@ -3398,34 +3434,21 @@ def _get_security_class(self): return self._attributes.get('RDB$SECURITY_CLASS') def _get_privileges(self): - return [p for p in self.schema.privileges - if ((p.user_name == self.name) and - (p.user_type in self._type_code))] + return self.schema.privileges.filter(lambda p: ((p.user_name == self.name) and + (p.user_type in self._type_code))) #--- Properties - - owner_name = LateBindingProperty(_get_owner_name,None,None,"User name of role owner.") - privileges = LateBindingProperty(_get_privileges,None,None, - "List of :class:`Privilege` objects granted to this object.") - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class name or None.") - - #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitRole(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitRole(self) + owner_name = LateBindingProperty(_get_owner_name, doc="User name of role owner.") + privileges = LateBindingProperty(_get_privileges, doc=":class:`~fdb.utils.ObjectList` of :class:`Privilege` objects granted to this object.") + security_class = LateBindingProperty(_get_security_class, doc="Security class name or None.") class FunctionArgument(BaseSchemaItem): """Represets UDF argument. Supported SQL actions: none. """ - def __init__(self,schema,function,attributes): - super(FunctionArgument,self).__init__(schema,attributes) + def __init__(self, schema, function, attributes): + super(FunctionArgument, self).__init__(schema, attributes) self._type_code = [15,] self.__function = function @@ -3439,7 +3462,6 @@ self._strip_attribute('RDB$DESCRIPTION') #--- Protected - def _get_name(self): return self.argument_name if self.argument_name else (self.function.name+ '_'+str(self._get_position())) @@ -3473,11 +3495,11 @@ # Classic external UDF l = [] precision_known = False - if self.field_type in (FBT_SMALLINT,FBT_INTEGER,FBT_BIGINT): + if self.field_type in (FBT_SMALLINT, FBT_INTEGER, FBT_BIGINT): if self.precision != None: - if (self.sub_type > 0) and (self.sub_type < MAX_INTSUBTYPES): + if (self.sub_type > 0) and (self.sub_type <= MAX_INTSUBTYPES): l.append('%s(%d, %d)' % \ - (INTEGRAL_SUBTYPES[self.sub_type],self.precision,-self.scale)) + (INTEGRAL_SUBTYPES[self.sub_type], self.precision, -self.scale)) precision_known = True if not precision_known: if (self.field_type == FBT_SMALLINT) and (self.scale < 0): @@ -3488,7 +3510,7 @@ l.append('NUMERIC(15, %d)' % -self.scale) else: l.append(COLUMN_TYPES[self.field_type]) - if self.field_type in (FBT_CHAR,FBT_VARCHAR,FBT_CSTRING): + if self.field_type in (FBT_CHAR, FBT_VARCHAR, FBT_CSTRING): l.append('(%d)' % (self.length if (self.character_length is None) else self.character_length)) if self.field_type == FBT_BLOB: @@ -3497,7 +3519,7 @@ l.append(' SUB_TYPE %s' % BLOB_SUBTYPES[self.sub_type]) else: l.append(' SUB_TYPE %d' % self.sub_type) - if self.field_type in (FBT_CHAR,FBT_VARCHAR,FBT_CSTRING,FBT_BLOB): + if self.field_type in (FBT_CHAR, FBT_VARCHAR, FBT_CSTRING, FBT_BLOB): if self._attributes['RDB$CHARACTER_SET_ID'] is not None and \ (self.character_set.name != self.schema.default_character_set.name): l.append(' CHARACTER SET %s' % self.character_set.name) @@ -3519,7 +3541,7 @@ def _get_collation(self): cid = self._attributes.get('RDB$COLLATION_ID') return (None if cid is None - else self.schema.get_collation_by_id(self.domain._attributes['RDB$CHARACTER_SET_ID'],cid)) + else self.schema.get_collation_by_id(self.domain._attributes['RDB$CHARACTER_SET_ID'], cid)) def _get_argument_mechanism(self): return self._attributes.get('RDB$ARGUMENT_MECHANISM') def _get_column(self): @@ -3541,55 +3563,33 @@ raise fdb.InternalError("Unknown parameter mechanism code: %d" % m) #--- Properties - - function = LateBindingProperty(_get_function,None,None, - ":class:`Function` to which this argument belongs.") - position = LateBindingProperty(_get_position,None,None,"Argument position.") - mechanism = LateBindingProperty(_get_mechanism,None,None,"How argument is passed.") - field_type = LateBindingProperty(_get_field_type,None,None, - "Number code of the data type defined for the argument.") - length = LateBindingProperty(_get_length,None,None, - "Length of the argument in bytes.") - scale = LateBindingProperty(_get_scale,None,None, - "Negative number representing the scale of NUMBER and DECIMAL argument.") - precision = LateBindingProperty(_get_precision,None,None, - "Indicates the number of digits of precision available to the data type of the argument.") - sub_type = LateBindingProperty(_get_sub_type,None,None,"BLOB subtype.") - character_length = LateBindingProperty(_get_character_length,None,None, - "Length of CHAR and VARCHAR column, in characters (not bytes).") - character_set = LateBindingProperty(_get_character_set,None,None, - ":class:`CharacterSet` for a character/text BLOB argument, or None.") - datatype = LateBindingProperty(_get_datatype,None,None, - "Comlete SQL datatype definition.") + function = LateBindingProperty(_get_function, doc=":class:`Function` to which this argument belongs.") + position = LateBindingProperty(_get_position, doc="Argument position.") + mechanism = LateBindingProperty(_get_mechanism, doc="How argument is passed.") + field_type = LateBindingProperty(_get_field_type, doc="Number code of the data type defined for the argument.") + length = LateBindingProperty(_get_length, doc="Length of the argument in bytes.") + scale = LateBindingProperty(_get_scale, doc="Negative number representing the scale of NUMBER and DECIMAL argument.") + precision = LateBindingProperty(_get_precision, doc="Indicates the number of digits of precision available to the data type of the argument.") + sub_type = LateBindingProperty(_get_sub_type, doc="BLOB subtype.") + character_length = LateBindingProperty(_get_character_length, doc="Length of CHAR and VARCHAR column, in characters (not bytes).") + character_set = LateBindingProperty(_get_character_set, doc=":class:`CharacterSet` for a character/text BLOB argument, or None.") + datatype = LateBindingProperty(_get_datatype, doc="Comlete SQL datatype definition.") # FB 3.0 - argument_name = LateBindingProperty(_get_argument_name,None,None,"Argument name.") - domain = LateBindingProperty(_get_domain,None,None,":class:`Domain` for this parameter.") - default = LateBindingProperty(_get_default,None,None,"Default value.") - collation = LateBindingProperty(_get_collation,None,None, - ":class:`collation` for this parameter.") - argument_mechanism = LateBindingProperty(_get_argument_mechanism,None,None, - "Argiment mechanism.") - column = LateBindingProperty(_get_column,None,None,":class:`TableColumn` for this parameter.") - type_from = LateBindingProperty(_get_type_from,None,None, - "Numeric code. See :attr:`Schema.enum_param_type_from`.`") - package = LateBindingProperty(_get_package,None,None, - "Package this function belongs to. \nObject is :class:`Package` instance or None.") - + argument_name = LateBindingProperty(_get_argument_name, doc="Argument name.") + domain = LateBindingProperty(_get_domain, doc=":class:`Domain` for this parameter.") + default = LateBindingProperty(_get_default, doc="Default value.") + collation = LateBindingProperty(_get_collation, doc=":class:`Collation` for this parameter.") + argument_mechanism = LateBindingProperty(_get_argument_mechanism, doc="Argument mechanism.") + column = LateBindingProperty(_get_column, doc=":class:`TableColumn` for this parameter.") + type_from = LateBindingProperty(_get_type_from, doc="Numeric code. See :attr:`Schema.enum_param_type_from`.`") + package = LateBindingProperty(_get_package, doc="Package this function belongs to.\nObject is :class:`Package` instance or None.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitFunctionArgument(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitFunctionArgument(self) def get_sql_definition(self): "Returns SQL definition for parameter." if self.function.isexternal(): return '%s%s%s' % (self.datatype, ' BY DESCRIPTOR' if self.isbydescriptor() else '', - ' BY VALUE' if self.isbyvalue() and self.isreturning() else '', - ) + ' BY VALUE' if self.isbyvalue() and self.isreturning() else '',) else: typedef = self.datatype if self.type_from == PROCPAR_DOMAIN: @@ -3613,14 +3613,14 @@ return self.mechanism == 0 def isbyreference(self): "Returns True if argument is passed by reference." - return self.mechanism in [1,5] - def isbydescriptor(self,any=False): + return self.mechanism in [1, 5] + def isbydescriptor(self, any=False): """Returns True if argument is passed by descriptor. :param bool any: If True, method returns True if any kind of descriptor is used (including BLOB and ARRAY descriptors). """ - return self.mechanism in [2,3,4] if any else self.mechanism == 2 + return self.mechanism in [2, 3, 4] if any else self.mechanism == 2 def iswithnull(self): "Returns True if argument is passed by reference with NULL support." return self.mechanism == 5 @@ -3646,14 +3646,15 @@ Supported SQL actions: - - External UDF: declare, drop - - PSQL UDF (FB 3, not declared in package): create, recreate, create_or_alter, drop, + - External UDF: declare, drop, comment + - PSQL UDF (FB 3, not declared in package): create(no_code=bool), + recreate(no_code=bool), create_or_alter(no_code=bool), drop, alter(arguments=string_or_list,returns=string,declare=string_or_list, code=string_or_list) - System UDF: none """ - def __init__(self,schema,attributes): - super(Function,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Function, self).__init__(schema, attributes) self._type_code = [15,] self.__arguments = None self.__returns = None @@ -3670,33 +3671,35 @@ if not self.issystemobject(): if self.isexternal(): - self._actions = ['declare','drop'] + self._actions = ['comment', 'declare', 'drop'] else: if self._attributes.get('RDB$PACKAGE_NAME') is None: - self._actions = ['create','recreate','alter','create_or_alter','drop'] - pass + self._actions = ['create', 'recreate', 'alter', 'create_or_alter', 'drop'] #--- Protected - - def _get_declare_sql(self,**params): - self._check_params(params,[]) + def _get_declare_sql(self, **params): + self._check_params(params, []) fdef = 'DECLARE EXTERNAL FUNCTION %s\n' % self.get_quoted_name() for p in self.arguments: fdef += ' %s%s\n' % (p.get_sql_definition(), '' if p.position == len(self.arguments) else ',') if self.has_return(): fdef += 'RETURNS %s%s\n' % ('PARAMETER %d' % self._attributes['RDB$RETURN_ARGUMENT'] - if self.has_return_argument() - else self.returns.get_sql_definition(), - ' FREE_IT' if self.returns.isfreeit() else '') - return "%sENTRY_POINT '%s'\nMODULE_NAME '%s'" % (fdef,self.entrypoint, - self.module_name) - def _get_drop_sql(self,**params): - self._check_params(params,[]) + if self.has_return_argument() + else self.returns.get_sql_definition(), + ' FREE_IT' if self.returns.isfreeit() else '') + return "%sENTRY_POINT '%s'\nMODULE_NAME '%s'" % (fdef, self.entrypoint, self.module_name) + def _get_drop_sql(self, **params): + self._check_params(params, []) return 'DROP%s FUNCTION %s' % (' EXTERNAL' if self.isexternal() else '', self.get_quoted_name()) - def _get_create_sql(self,**params): - self._check_params(params,[]) + def _get_comment_sql(self, **params): + return 'COMMENT ON EXTERNAL FUNCTION %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) + def _get_create_sql(self, **params): + self._check_params(params, ['no_code']) + no_code = params.get('no_code') result = 'CREATE FUNCTION %s' % self.get_quoted_name() if self.has_arguments(): if len(self.arguments) == 1: @@ -3710,21 +3713,21 @@ else: result += '\n' result += 'RETURNS %s\n' % self.returns.get_sql_definition() - return result+'AS\n'+self.source - def _get_alter_sql(self,**params): - self._check_params(params,['arguments','returns','declare','code']) + return result+'AS\n'+('BEGIN\nEND' if no_code else self.source) + def _get_alter_sql(self, **params): + self._check_params(params, ['arguments', 'returns', 'declare', 'code']) arguments = params.get('arguments') returns = params.get('returns') if returns is None: raise fdb.ProgrammingError("Missing required parameter: 'returns'.") declare = params.get('declare') code = params.get('code') - if code is None: + if 'code' not in params: raise fdb.ProgrammingError("Missing required parameter: 'code'.") # header = '' if arguments is not None: - if isinstance(arguments,(list,tuple)): + if isinstance(arguments, (list, tuple)): numpars = len(arguments) if numpars == 1: header = ' (%s)\n' % arguments @@ -3732,7 +3735,7 @@ header = ' (\n' i = 1 for p in arguments: - header += ' %s%s\n' % (p,'' if i == numpars else ',') + header += ' %s%s\n' % (p, '' if i == numpars else ',') i += 1 header += ')\n' else: @@ -3745,37 +3748,39 @@ if code: if declare is None: d = '' - elif isinstance(declare,(list,tuple)): + elif isinstance(declare, (list, tuple)): d = '' for x in declare: d += ' %s\n' % x else: d = '%s\n' % declare - if isinstance(code,(list,tuple)): + if isinstance(code, (list, tuple)): c = '' for x in code: c += ' %s\n' % x else: c = '%s\n' % code - body = '%sAS\n%sBEGIN\n%sEND' % ('' if header else '\n',d,c) + body = '%sAS\n%sBEGIN\n%sEND' % ('' if header else '\n', d, c) else: body = '%sAS\nBEGIN\nEND' % ('' if header else '\n') # - return 'ALTER FUNCTION %s%s%s' % (self.get_quoted_name(),header,body) - def _load_arguments(self,mock=None): - cols = ['RDB$FUNCTION_NAME','RDB$ARGUMENT_POSITION','RDB$MECHANISM', - 'RDB$FIELD_TYPE','RDB$FIELD_SCALE','RDB$FIELD_LENGTH', - 'RDB$FIELD_SUB_TYPE','RDB$CHARACTER_SET_ID','RDB$FIELD_PRECISION', + return 'ALTER FUNCTION %s%s%s' % (self.get_quoted_name(), header, body) + def _load_arguments(self, mock=None): + cols = ['RDB$FUNCTION_NAME', 'RDB$ARGUMENT_POSITION', 'RDB$MECHANISM', + 'RDB$FIELD_TYPE', 'RDB$FIELD_SCALE', 'RDB$FIELD_LENGTH', + 'RDB$FIELD_SUB_TYPE', 'RDB$CHARACTER_SET_ID', 'RDB$FIELD_PRECISION', 'RDB$CHARACTER_LENGTH'] if self.__ods >= fdb.ODS_FB_30: - cols.extend(['RDB$PACKAGE_NAME','RDB$ARGUMENT_NAME','RDB$FIELD_SOURCE', - 'RDB$DEFAULT_SOURCE','RDB$COLLATION_ID','RDB$NULL_FLAG', - 'RDB$ARGUMENT_MECHANISM','RDB$FIELD_NAME','RDB$RELATION_NAME', - 'RDB$SYSTEM_FLAG','RDB$DESCRIPTION']) - self.__arguments = [FunctionArgument(self.schema,self,row) for row in - (mock if mock else - self.schema._select("""select %s from rdb$function_arguments -where rdb$function_name = ? order by rdb$argument_position""" % ','.join(cols),(self.name,)))] + cols.extend(['RDB$PACKAGE_NAME', 'RDB$ARGUMENT_NAME', 'RDB$FIELD_SOURCE', + 'RDB$DEFAULT_SOURCE', 'RDB$COLLATION_ID', 'RDB$NULL_FLAG', + 'RDB$ARGUMENT_MECHANISM', 'RDB$FIELD_NAME', 'RDB$RELATION_NAME', + 'RDB$SYSTEM_FLAG', 'RDB$DESCRIPTION']) + self.__arguments = ObjectList((FunctionArgument(self.schema, self, row) for row in + (mock if mock else + self.schema._select("""select %s from rdb$function_arguments +where rdb$function_name = ? order by rdb$argument_position""" % ','.join(cols), (self.name,)))), + FunctionArgument) + self.__arguments.freeze() rarg = self._attributes['RDB$RETURN_ARGUMENT'] if rarg is not None: for a in self.__arguments: @@ -3794,7 +3799,7 @@ def _get_arguments(self): if self.__arguments is None: self._load_arguments() - return [a for a in self.__arguments if a.position != 0] + return self.__arguments.filter(lambda a: a.position != 0) def _get_engine_mame(self): return self._attributes.get('RDB$ENGINE_NAME') def _get_package(self): @@ -3818,35 +3823,26 @@ return self._attributes.get('RDB$DETERMINISTIC_FLAG') #--- Properties - - module_name = LateBindingProperty(_get_module_name,None,None,"Module name.") - entrypoint = LateBindingProperty(_get_entrypoint,None,None,"Entrypoint in module.") - returns = LateBindingProperty(_get_returns,None,None, - "Returning :class:`FunctionArgument` or None.") - arguments = LateBindingProperty(_get_arguments,None,None, - "List of function arguments. Items are :class:`FunctionArgument` instances.") + module_name = LateBindingProperty(_get_module_name, doc="Module name.") + entrypoint = LateBindingProperty(_get_entrypoint, doc="Entrypoint in module.") + returns = LateBindingProperty(_get_returns, doc="Returning :class:`FunctionArgument` or None.") + arguments = LateBindingProperty(_get_arguments, + doc=":class:`~fdb.utils.ObjectList` of function arguments." + " Items are :class:`FunctionArgument` instances.") # Firebird 3.0 - engine_mame = LateBindingProperty(_get_engine_mame,None,None,"Engine name.") - package = LateBindingProperty(_get_package,None,None, - "Package this function belongs to. \nObject is :class:`Package` instance or None.") - private_flag = LateBindingProperty(_get_private_flag,None,None,"Private flag.") - source = LateBindingProperty(_get_source,None,None,"Function source.") - id = LateBindingProperty(_get_id,None,None,"Function ID.") - valid_blr = LateBindingProperty(_get_valid_blr,None,None,"BLR validity flag.") - security_class = LateBindingProperty(_get_security_class,None,None,"Security class.") - owner_name = LateBindingProperty(_get_owner_name,None,None,"Owner name.") - legacy_flag = LateBindingProperty(_get_legacy_flag,None,None,"Legacy flag.") - deterministic_flag = LateBindingProperty(_get_deterministic_flag,None,None, - "Deterministic flag.") - + engine_mame = LateBindingProperty(_get_engine_mame, doc="Engine name.") + package = LateBindingProperty(_get_package, + doc="Package this function belongs to. \nObject is" + " :class:`Package` instance or None.") + private_flag = LateBindingProperty(_get_private_flag, doc="Private flag.") + source = LateBindingProperty(_get_source, doc="Function source.") + id = LateBindingProperty(_get_id, doc="Function ID.") + valid_blr = LateBindingProperty(_get_valid_blr, doc="BLR validity flag.") + security_class = LateBindingProperty(_get_security_class, doc="Security class.") + owner_name = LateBindingProperty(_get_owner_name, doc="Owner name.") + legacy_flag = LateBindingProperty(_get_legacy_flag, doc="Legacy flag.") + deterministic_flag = LateBindingProperty(_get_deterministic_flag, doc="Deterministic flag.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitFunction(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitFunction(self) def isexternal(self): "Returns True if function is external UDF, False for PSQL functions." return True if self.module_name else False @@ -3855,10 +3851,10 @@ return bool(self.arguments) def has_return(self): "Returns True if function returns a value." - return (self.returns is not None) + return self.returns is not None def has_return_argument(self): "Returns True if function returns a value in input argument." - return (self.returns.position != 0 if self.returns is not None else False) + return self.returns.position != 0 if self.returns is not None else False def ispackaged(self): "Returns True if function is defined in package." return bool(self._attributes.get('RDB$PACKAGE_NAME')) @@ -3868,14 +3864,13 @@ Supported SQL actions: create """ - def __init__(self,schema,attributes): - super(DatabaseFile,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(DatabaseFile, self).__init__(schema, attributes) self._type_code = [] self._strip_attribute('RDB$FILE_NAME') #--- Protected - def _get_name(self): return 'FILE_%d' % self.sequence def _get_filename(self): @@ -3888,20 +3883,11 @@ return self._attributes['RDB$FILE_LENGTH'] #--- Properties - - filename = LateBindingProperty(_get_filename,None,None,"File name.") - sequence = LateBindingProperty(_get_sequence,None,None,"File sequence number.") - start = LateBindingProperty(_get_start,None,None,"File start page number.") - length = LateBindingProperty(_get_length,None,None,"File length in pages.") - + filename = LateBindingProperty(_get_filename, doc="File name.") + sequence = LateBindingProperty(_get_sequence, doc="File sequence number.") + start = LateBindingProperty(_get_start, doc="File start page number.") + length = LateBindingProperty(_get_length, doc="File length in pages.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitDatabaseFile(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitDatabaseFile(self) def issystemobject(self): "Returns True." return True @@ -3915,34 +3901,34 @@ SHADOW_MANUAL = 4 SHADOW_CONDITIONAL = 16 - def __init__(self,schema,attributes): - super(Shadow,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Shadow, self).__init__(schema, attributes) self._type_code = [] self.__files = None - self._actions = ['create','drop'] + self._actions = ['create', 'drop'] - def _get_create_sql(self,**params): - self._check_params(params,[]) - result = 'CREATE SHADOW %d %s%s' % (self.id,'MANUAL' if self.ismanual() - else 'AUTO', - ' CONDITIONAL' if self.isconditional() - else '') + def _get_create_sql(self, **params): + self._check_params(params, []) + result = 'CREATE SHADOW %d %s%s' % (self.id, 'MANUAL' if self.ismanual() + else 'AUTO', + ' CONDITIONAL' if self.isconditional() + else '') if len(self.files) == 1: result += " '%s'" % self.files[0].filename else: f = self.files[0] result += " '%s'%s\n" % (f.filename, - ' LENGTH %d' % f.length if f.length > 0 else '') + ' LENGTH %d' % f.length if f.length > 0 else '') for f in self.files[1:]: result += " FILE '%s'%s%s" % (f.filename, - ' STARTING AT %d' % f.start if f.start > 0 else '', - ' LENGTH %d' % f.length if f.length > 0 else '') + ' STARTING AT %d' % f.start if f.start > 0 else '', + ' LENGTH %d' % f.length if f.length > 0 else '') if f.sequence < len(self.files)-1: result += '\n' return result - def _get_drop_sql(self,**params): - self._check_params(params,['preserve']) + def _get_drop_sql(self, **params): + self._check_params(params, ['preserve']) preserve = params.get('preserve') return 'DROP SHADOW %d%s' % (self.id, ' PRESERVE FILE' if preserve else '') def _get_name(self): @@ -3953,29 +3939,20 @@ return self._attributes['RDB$FILE_FLAGS'] def _get_files(self): if self.__files is None: - self.__files = [DatabaseFile(self,row) for row + self.__files = [DatabaseFile(self, row) for row in self.schema._select(""" select RDB$FILE_NAME, RDB$FILE_SEQUENCE, RDB$FILE_START, RDB$FILE_LENGTH from RDB$FILES where RDB$SHADOW_NUMBER = ? -order by RDB$FILE_SEQUENCE""",(self._attributes['RDB$SHADOW_NUMBER'],))] +order by RDB$FILE_SEQUENCE""", (self._attributes['RDB$SHADOW_NUMBER'],))] return self.__files #--- Properties - - id = LateBindingProperty(_get_id,None,None,"Shadow ID number.") - flags = LateBindingProperty(_get_flags,None,None,"Shadow flags.") - files = LateBindingProperty(_get_files,None,None, - "List of shadow files. Items are :class:`DatabaseFile` instances.") - + id = LateBindingProperty(_get_id, doc="Shadow ID number.") + flags = LateBindingProperty(_get_flags, doc="Shadow flags.") + files = LateBindingProperty(_get_files, + doc="List of shadow files. Items are :class:`DatabaseFile` instances.") #--- Public - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitShadow(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitShadow(self) def issystemobject(self): "Returns False." return False @@ -3994,38 +3971,38 @@ Supported SQL actions: grant(grantors),revoke(grantors,grant_option) """ - def __init__(self,schema,attributes): - super(Privilege,self).__init__(schema,attributes) + def __init__(self, schema, attributes): + super(Privilege, self).__init__(schema, attributes) self._type_code = [] - self._actions = ['grant','revoke'] + self._actions = ['grant', 'revoke'] self._strip_attribute('RDB$USER') self._strip_attribute('RDB$GRANTOR') self._strip_attribute('RDB$PRIVILEGE') self._strip_attribute('RDB$RELATION_NAME') self._strip_attribute('RDB$FIELD_NAME') - def _get_grant_sql(self,**params): - self._check_params(params,['grantors']) - grantors = params.get('grantors') - privileges = {'S':'SELECT','I':'INSERT','U':'UPDATE','D':'DELETE','R':'REFERENCES'} + def _get_grant_sql(self, **params): + self._check_params(params, ['grantors']) + grantors = params.get('grantors', ['SYSDBA']) + privileges = {'S':'SELECT', 'I':'INSERT', 'U':'UPDATE', 'D':'DELETE', 'R':'REFERENCES'} admin_option = ' WITH GRANT OPTION' if self.has_grant() else '' if self.privilege in privileges: privilege = privileges[self.privilege] if self.field_name is not None: privilege += '(%s)' % self.field_name - privilege += ' ON ' + privilege += ' ON ' elif self.privilege == 'X': # procedure privilege = 'EXECUTE ON PROCEDURE ' else: # role membership privilege = '' admin_option = ' WITH ADMIN OPTION' if self.has_grant() else '' user = self.user - if isinstance(user,Procedure): + if isinstance(user, Procedure): utype = 'PROCEDURE ' - elif isinstance(user,Trigger): + elif isinstance(user, Trigger): utype = 'TRIGGER ' - elif isinstance(user,View): + elif isinstance(user, View): utype = 'VIEW ' else: utype = '' @@ -4033,32 +4010,32 @@ granted_by = ' GRANTED BY %s' % self.grantor_name else: granted_by = '' - return 'GRANT %s%s TO %s%s%s%s' % (privilege,self.subject_name,utype, - self.user_name,admin_option,granted_by) - def _get_revoke_sql(self,**params): - self._check_params(params,['grant_option','grantors']) - grantors = params.get('grantors') - option_only = params.get('grant_option',False) + return 'GRANT %s%s TO %s%s%s%s' % (privilege, self.subject_name, utype, + self.user_name, admin_option, granted_by) + def _get_revoke_sql(self, **params): + self._check_params(params, ['grant_option', 'grantors']) + grantors = params.get('grantors', ['SYSDBA']) + option_only = params.get('grant_option', False) if option_only and not self.has_grant(): raise fdb.ProgrammingError("Can't revoke grant option that wasn't granted.") - privileges = {'S':'SELECT','I':'INSERT','U':'UPDATE','D':'DELETE','R':'REFERENCES'} + privileges = {'S':'SELECT', 'I':'INSERT', 'U':'UPDATE', 'D':'DELETE', 'R':'REFERENCES'} admin_option = 'GRANT OPTION FOR ' if self.has_grant() and option_only else '' if self.privilege in privileges: privilege = privileges[self.privilege] if self.field_name is not None: privilege += '(%s)' % self.field_name - privilege += ' ON ' + privilege += ' ON ' elif self.privilege == 'X': # procedure privilege = 'EXECUTE ON PROCEDURE ' else: # role membership privilege = '' admin_option = 'ADMIN OPTION FOR' if self.has_grant() and option_only else '' user = self.user - if isinstance(user,Procedure): + if isinstance(user, Procedure): utype = 'PROCEDURE ' - elif isinstance(user,Trigger): + elif isinstance(user, Trigger): utype = 'TRIGGER ' - elif isinstance(user,View): + elif isinstance(user, View): utype = 'VIEW ' else: utype = '' @@ -4066,9 +4043,8 @@ granted_by = ' GRANTED BY %s' % self.grantor_name else: granted_by = '' - return 'REVOKE %s%s%s FROM %s%s%s' % (admin_option,privilege, - self.subject_name,utype, - self.user_name,granted_by) + return 'REVOKE %s%s%s FROM %s%s%s' % (admin_option, privilege, self.subject_name, utype, + self.user_name, granted_by) def _get_user(self): return self.schema._get_item(self._attributes['RDB$USER'], self._attributes['RDB$USER_TYPE']) @@ -4097,29 +4073,21 @@ return self._attributes['RDB$FIELD_NAME'] #--- Properties - - user = LateBindingProperty(_get_user,None,None, - "Grantee. Either :class:`~fdb.services.User`, :class:`Role`, " \ - ":class:`Procedure`, :class:`Trigger` or :class:`View` object.") - grantor = LateBindingProperty(_get_grantor,None,None, - "Grantor :class:`~fdb.services.User` object.") - privilege = LateBindingProperty(_get_privilege,None,None,"Privilege code.") - subject = LateBindingProperty(_get_subject,None,None, - "Priviledge subject. Either :class:`Role`, :class:`Table`, :class:`View` " \ - "or :class:`Procedure` object.") - user_name = LateBindingProperty(_get_user_name,None,None,"User name.") - user_type = LateBindingProperty(_get_user_type,None,None,"User type.") - grantor_name = LateBindingProperty(_get_grantor_name,None,None,"Grantor name.") - subject_name = LateBindingProperty(_get_subject_name,None,None,"Subject name.") - subject_type = LateBindingProperty(_get_subject_type,None,None,"Subject type.") - field_name = LateBindingProperty(_get_field_name,None,None,"Field name.") - - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitPrivilege(self)` on parameter object. - - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitPrivilege(self) + user = LateBindingProperty(_get_user, + doc="Grantee. Either :class:`~fdb.services.User`, :class:`Role`," + " :class:`Procedure`, :class:`Trigger` or :class:`View` object.") + grantor = LateBindingProperty(_get_grantor, doc="Grantor :class:`~fdb.services.User` object.") + privilege = LateBindingProperty(_get_privilege, doc="Privilege code.") + subject = LateBindingProperty(_get_subject, + doc="Priviledge subject. Either :class:`Role`, :class:`Table`," + " :class:`View` or :class:`Procedure` object.") + user_name = LateBindingProperty(_get_user_name, doc="User name.") + user_type = LateBindingProperty(_get_user_type, doc="User type.") + grantor_name = LateBindingProperty(_get_grantor_name, doc="Grantor name.") + subject_name = LateBindingProperty(_get_subject_name, doc="Subject name.") + subject_type = LateBindingProperty(_get_subject_type, doc="Subject type.") + field_name = LateBindingProperty(_get_field_name, doc="Field name.") + #--- Public def has_grant(self): "Returns True if privilege comes with GRANT OPTION." return bool(self._attributes['RDB$GRANT_OPTION']) @@ -4151,35 +4119,35 @@ class Package(BaseSchemaItem): """Represents PSQL package. - Supported SQL actions: create(body=bool), recreate(body=bool), create_or_alter, - alter(header=string_or_list), drop(body=bool),alter + Supported SQL actions: create(body=bool), recreate(body=bool), create_or_alter(body=bool), + alter(header=string_or_list), drop(body=bool), alter """ - def __init__(self,schema,attributes): - super(Package,self).__init__(schema,attributes) - self._type_code = [18,19] + def __init__(self, schema, attributes): + super(Package, self).__init__(schema, attributes) + self._type_code = [18, 19] - self._actions = ['create','recreate','create_or_alter','alter','drop'] + self._actions = ['create', 'recreate', 'create_or_alter', 'alter', 'drop'] self._strip_attribute('RDB$PACKAGE_NAME') self._strip_attribute('RDB$SECURITY_CLASS') self._strip_attribute('RDB$OWNER_NAME') - def _get_create_sql(self,**params): - self._check_params(params,['body']) + def _get_create_sql(self, **params): + self._check_params(params, ['body']) body = params.get('body') cbody = 'BODY ' if body else '' result = 'CREATE PACKAGE %s%s' % (cbody, self.get_quoted_name()) return result+'\nAS\n'+(self.body if body else self.header) - def _get_alter_sql(self,**params): - self._check_params(params,['header']) + def _get_alter_sql(self, **params): + self._check_params(params, ['header']) header = params.get('header') if not header: hdr = '' else: - hdr = '\n'.join(header) if isinstance(header,fdb.ListType) else header - return 'ALTER PACKAGE %s\nAS\nBEGIN\n%s\nEND' % (self.get_quoted_name(),hdr) - def _get_drop_sql(self,**params): - self._check_params(params,['body']) + hdr = '\n'.join(header) if isinstance(header, fdb.ListType) else header + return 'ALTER PACKAGE %s\nAS\nBEGIN\n%s\nEND' % (self.get_quoted_name(), hdr) + def _get_drop_sql(self, **params): + self._check_params(params, ['body']) body = params.get('body') cbody = 'BODY ' if body else '' return 'DROP PACKAGE %s%s' % (cbody, self.get_quoted_name()) @@ -4194,89 +4162,118 @@ def _get_body(self): return self._attributes['RDB$PACKAGE_BODY_SOURCE'] def _get_functions(self): - return [fn for fn in self.schema.functions - if fn._attributes['RDB$PACKAGE_NAME'] == self.name] + return self.schema.functions.filter(lambda fn: + fn._attributes['RDB$PACKAGE_NAME'] == self.name) def _get_procedures(self): - return [proc for proc in self.schema.procedures - if proc._attributes['RDB$PACKAGE_NAME'] == self.name] + return self.schema.procedures.filter(lambda proc: + proc._attributes['RDB$PACKAGE_NAME'] == self.name) #--- Properties + header = LateBindingProperty(_get_header, doc="Package header source.") + body = LateBindingProperty(_get_body, doc="Package body source.") + security_class = LateBindingProperty(_get_security_class, doc="Security class name or None.") + owner_name = LateBindingProperty(_get_owner_name, doc="User name of package creator.") + functions = LateBindingProperty(_get_functions, + doc=":class:`~fdb.utils.ObjectList` of package functions." + " Items are :class:`Function` instances.") + procedures = LateBindingProperty(_get_procedures, + doc=":class:`~fdb.utils.ObjectList` of package procedures." + " Items are :class:`Procedure` instances.") + #--- Public + def has_valid_body(self): + result = self._attributes.get('RDB$VALID_BODY_FLAG') + return bool(result) if result is not None else None + +class BackupHistory(BaseSchemaItem): + """Represents entry of history for backups performed +using the nBackup utility. + + Supported SQL actions: None + """ + def __init__(self, schema, attributes): + super(BackupHistory, self).__init__(schema, attributes) + self._type_code = [] + + self._strip_attribute('RDB$FILE_NAME') - header = LateBindingProperty(_get_header,None,None,"Package header source.") - body = LateBindingProperty(_get_body,None,None,"Package body source.") - security_class = LateBindingProperty(_get_security_class,None,None, - "Security class name or None.") - owner_name = LateBindingProperty(_get_owner_name,None,None,"User name of package creator.") - functions = LateBindingProperty(_get_functions,None,None, - "List of package functions. Items are :class:`Function` instances.") - procedures = LateBindingProperty(_get_procedures,None,None, - "List of package procedures. Items are :class:`Procedure` instances.") + #--- Protected + def _get_name(self): + return 'BCKP_%d' % self.sequence + def _get_backup_id(self): + return self._attributes['RDB$BACKUP_ID'] + def _get_filename(self): + return self._attributes['RDB$FILE_NAME'] + def _get_created(self): + return self._attributes['RDB$TIMESTAMP'] + def _get_level(self): + return self._attributes['RDB$BACKUP_LEVEL'] + def _get_scn(self): + return self._attributes['RDB$SCN'] + def _get_guid(self): + return self._attributes['RDB$GUID'] + #--- Properties + backup_id = LateBindingProperty(_get_backup_id, doc="The identifier assigned by the engine.") + filename = LateBindingProperty(_get_filename, doc="Full path and file name of backup file.") + created = LateBindingProperty(_get_created, doc="Backup date and time.") + level = LateBindingProperty(_get_level, doc="Backup level.") + scn = LateBindingProperty(_get_scn, doc="System (scan) number.") + guid = LateBindingProperty(_get_guid, doc="Unique identifier.") #--- Public + def issystemobject(self): + "Returns True." + return True - def accept_visitor(self,visitor): - """Visitor Pattern support. Calls `visitProcedure(self)` on parameter object. - :param visitor: Visitor object of Vistior Pattern. - """ - visitor.visitPackage(self) - def has_valid_body(self): - result = self._attributes.get('RDB$VALID_BODY_FLAG') - return bool(result) if result is not None else None +class Filter(BaseSchemaItem): + """Represents userdefined BLOB filter. -class SchemaVisitor(object): - """Helper class for implementation of schema Visitor. + Supported SQL actions: - Implements all `visit*` methods supported by schema classes as calls to - :meth:`default_action`. + - BLOB filter: declare, drop, comment + - System UDF: none """ - def default_action(self,obj): - "Does nothing." - pass - def visitSchema(self,schema): - self.default_action(schema) - def visitMetadataItem(self,item): - self.default_action(item) - def visitCollation(self,collation): - self.default_action(collation) - def visitCharacterSet(self,character_set): - self.default_action(character_set) - def visitException(self,exception): - self.default_action(exception) - def visitGenerator(self,generator): - self.default_action(generator) - def visitTableColumn(self,column): - self.default_action(column) - def visitIndex(self,index): - self.default_action(index) - def visitViewColumn(self,column): - self.default_action(column) - def visitDomain(self,domain): - self.default_action(domain) - def visitDependency(self,dependency): - self.default_action(dependency) - def visitConstraint(self,constraint): - self.default_action(constraint) - def visitTable(self,table): - self.default_action(table) - def visitView(self,view): - self.default_action(view) - def visitTrigger(self,trigger): - self.default_action(trigger) - def visitProcedureParameter(self,param): - self.default_action(param) - def visitProcedure(self,procedure): - self.default_action(procedure) - def visitRole(self,role): - self.default_action(role) - def visitFunctionArgument(self,arg): - self.default_action(arg) - def visitFunction(self,function): - self.default_action(function) - def visitDatabaseFile(self,dbfile): - self.default_action(dbfile) - def visitShadow(self,shadow): - self.default_action(shadow) - def visitPackage(self,package): - self.default_action(package) + def __init__(self, schema, attributes): + super(Filter, self).__init__(schema, attributes) + self._type_code = [16,] + + self._strip_attribute('RDB$FUNCTION_NAME') + self._strip_attribute('RDB$MODULE_NAME') + self._strip_attribute('RDB$ENTRYPOINT') + + self.__ods = schema._con.ods + + if not self.issystemobject(): + self._actions = ['comment', 'declare', 'drop'] + + #--- Protected + def _get_declare_sql(self, **params): + self._check_params(params, []) + fdef = 'DECLARE FILTER %s\nINPUT_TYPE %d OUTPUT_TYPE %d\n' % (self.get_quoted_name(), + self.input_sub_type, + self.output_sub_type) + return "%sENTRY_POINT '%s' MODULE_NAME '%s'" % (fdef, self.entrypoint, self.module_name) + def _get_drop_sql(self, **params): + self._check_params(params, []) + return 'DROP FILTER %s' % self.get_quoted_name() + def _get_comment_sql(self, **params): + return 'COMMENT ON FILTER %s IS %s' % (self.get_quoted_name(), + 'NULL' if self.description is None + else "'%s'" % escape_single_quotes(self.description)) + def _get_name(self): + return self._attributes['RDB$FUNCTION_NAME'] + def _get_module_name(self): + return self._attributes['RDB$MODULE_NAME'] + def _get_entrypoint(self): + return self._attributes['RDB$ENTRYPOINT'] + def _get_input_sub_type(self): + return self._attributes.get('RDB$INPUT_SUB_TYPE') + def _get_output_sub_type(self): + return self._attributes.get('RDB$OUTPUT_SUB_TYPE') + + #--- Properties + module_name = LateBindingProperty(_get_module_name, doc="The name of the dynamic library or shared object where the code of the BLOB filter is located.") + entrypoint = LateBindingProperty(_get_entrypoint, doc="The exported name of the BLOB filter in the filter library.") + input_sub_type = LateBindingProperty(_get_input_sub_type, doc="The BLOB subtype of the data to be converted by the function.") + output_sub_type = LateBindingProperty(_get_output_sub_type, doc="The BLOB subtype of the converted data.") diff -Nru fdb-1.6.1+dfsg1/fdb/services.py fdb-2.0.0/fdb/services.py --- fdb-1.6.1+dfsg1/fdb/services.py 2016-11-24 12:04:49.000000000 +0000 +++ fdb-2.0.0/fdb/services.py 2018-04-26 14:39:03.000000000 +0000 @@ -2,7 +2,7 @@ # # PROGRAM/MODULE: fdb # FILE: services.py -# DESCRIPTION: Python driver for Firebird +# DESCRIPTION: Python driver for Firebird - Firebird services # CREATED: 19.11.2011 # # Software distributed under the License is distributed AS IS, @@ -33,44 +33,40 @@ api = None -# The following SHUT_* constants are to be passed as the `shutdown_mode` -# parameter to Connection.shutdown: +#: The following SHUT_* constants are to be passed as the `shutdown_mode` parameter to Connection.shutdown: SHUT_LEGACY = -1 SHUT_NORMAL = ibase.isc_spb_prp_sm_normal SHUT_MULTI = ibase.isc_spb_prp_sm_multi SHUT_SINGLE = ibase.isc_spb_prp_sm_single SHUT_FULL = ibase.isc_spb_prp_sm_full -# The following SHUT_* constants are to be passed as the `shutdown_method` -# parameter to Connection.shutdown: +#: The following SHUT_* constants are to be passed as the `shutdown_method` parameter to Connection.shutdown: SHUT_FORCE = ibase.isc_spb_prp_shutdown_db SHUT_DENY_NEW_TRANSACTIONS = ibase.isc_spb_prp_deny_new_transactions SHUT_DENY_NEW_ATTACHMENTS = ibase.isc_spb_prp_deny_new_attachments -# The following WRITE_* constants are to be passed as the `mode` parameter -# to Connection.set_write_mode: +#: The following WRITE_* constants are to be passed as the `mode` parameter to Connection.set_write_mode: WRITE_FORCED = ibase.isc_spb_prp_wm_sync WRITE_BUFFERED = ibase.isc_spb_prp_wm_async -# The following ACCESS_* constants are to be passed as the `mode` parameter -# to Connection.set_access_mode: +#: The following ACCESS_* constants are to be passed as the `mode` parameter to Connection.set_access_mode: ACCESS_READ_WRITE = ibase.isc_spb_prp_am_readwrite ACCESS_READ_ONLY = ibase.isc_spb_prp_am_readonly -# The following CAPABILITY_* constants are return values of `get_server_capabilities` +#: The following CAPABILITY_* constants are return values of `get_server_capabilities` CAPABILITY_MULTI_CLIENT = 0x2 CAPABILITY_REMOTE_HOP = 0x4 CAPABILITY_SERVER_CONFIG = 0x200 CAPABILITY_QUOTED_FILENAME = 0x400 CAPABILITY_NO_SERVER_SHUTDOWN = 0x100 -# The following STATS_* constants are options for backup/restore 'stats' parameter. +#: The following STATS_* constants are options for backup/restore 'stats' parameter. STATS_TOTAL_TIME = 'T' STATS_TIME_DELTA = 'D' STATS_PAGE_READS = 'R' STATS_PAGE_WRITES = 'W' -def _checkString(st): +def _check_string(st): if ibase.PYTHON_MAJOR_VER == 3: try: if isinstance(st, str): @@ -82,13 +78,11 @@ else: if not isinstance(st, ibase.mybytes): raise TypeError('String argument to Services API must be' - ' of type %s, not %s.' % (type(ibase.mybytes),type(st)) - ) + ' of type %s, not %s.' % (type(ibase.mybytes), type(st))) except UnicodeEncodeError: raise TypeError("The database engine's Services API only works" - " properly with ASCII string parameters, so str instances that" - " contain non-ASCII characters are disallowed." - ) + " properly with ASCII string parameters, so str instances that" + " contain non-ASCII characters are disallowed.") else: try: if isinstance(st, ibase.UnicodeType): @@ -101,46 +95,43 @@ else: if not isinstance(st, ibase.mybytes): raise TypeError('String argument to Services API must be' - ' of type %s, not %s.' % (type(ibase.mybytes),type(st)) - ) + ' of type %s, not %s.' % (type(ibase.mybytes), type(st))) except UnicodeError: raise TypeError("The database engine's Services API only works" - " properly with ASCII string parameters, so str instances that" - " contain non-ASCII characters, and all unicode instances, are" - " disallowed." - ) - + " properly with ASCII string parameters, so str instances that" + " contain non-ASCII characters, and all unicode instances, are" + " disallowed.") def _string2spb(spb, code, st): myslen = len(st) - _numeric2spb(spb, code, myslen, numCType='H') + _numeric2spb(spb, code, myslen, numctype='H') myformat = str(myslen) + 's' # The length, then 's'. spb.append(struct.pack(myformat, st)) -def _numeric2spb(spb, code, num, numCType='I'): +def _numeric2spb(spb, code, num, numctype='I'): # numCType is one of the pack format characters specified by the Python # standard library module 'struct'. _code2spb(spb, code) - (numericFormat, numericBytes) = _renderSizedIntegerForSPB(num, numCType) - spb.append(struct.pack(numericFormat, numericBytes)) + (numeric_format, numeric_bytes) = _render_sized_integer_for_spb(num, numctype) + spb.append(struct.pack(numeric_format, numeric_bytes)) def _code2spb(spb, code): - (myformat, mybytes) = _renderSizedIntegerForSPB(code, 'b') + (myformat, mybytes) = _render_sized_integer_for_spb(code, 'b') spb.append(struct.pack(myformat, mybytes)) def _vax_inverse(i, myformat): # Apply the inverse of _ksrv.isc_vax_integer to a Python integer; return # the raw bytes of the resulting value. - iRaw = struct.pack(myformat, i) - iConv = api.isc_vax_integer(iRaw, len(iRaw)) - iConvRaw = struct.pack(myformat, iConv) - return iConvRaw + iraw = struct.pack(myformat, i) + iconv = api.isc_vax_integer(iraw, len(iraw)) + iconvraw = struct.pack(myformat, iconv) + return iconvraw -def _renderSizedIntegerForSPB(i, myformat): +def _render_sized_integer_for_spb(i, myformat): # In order to prepare the Python integer i for inclusion in a Services # API action request buffer, the byte sequence of i must be reversed, which # will make i unrepresentible as a normal Python integer. @@ -157,15 +148,12 @@ # (iPackFormat, iRawBytes) = _renderSizedIntegerForSPB(12345, 'I') # spbBytes = struct.pack(iPackFormat, iRawBytes) # - destFormat = '%ds' % struct.calcsize(myformat) - destVal = _vax_inverse(i, myformat) - return (destFormat, destVal) + dest_format = '%ds' % struct.calcsize(myformat) + dest_val = _vax_inverse(i, myformat) + return (dest_format, dest_val) -def connect(host='service_mgr', - user=os.environ.get('ISC_USER', 'sysdba'), - password=os.environ.get('ISC_PASSWORD', None) - ): +def connect(host='service_mgr', user=None, password=None): """Establishes a connection to the Services Manager. :param string host: (optional) Host machine specification. Local by default. @@ -180,15 +168,26 @@ By definition, a Services Manager connection is bound to a particular host. Therefore, the database specified as a parameter to methods such as `getStatistics` MUST NOT include the host name of the database server. + + **Hooks:** + + Event `HOOK_SERVICE_ATTACHED`: Executed before :class:`Connection` + instance is returned. Hook must have signature: + hook_func(connection). Any value returned by hook is ignored. + """ - setattr(sys.modules[__name__],'api',fdb.load_api()) + setattr(sys.modules[__name__], 'api', fdb.load_api()) + if not user: + user = os.environ.get('ISC_USER', 'SYSDBA') + if not password: + password = os.environ.get('ISC_PASSWORD', None) if password is None: raise fdb.ProgrammingError('A password is required to use' ' the Services Manager.') - _checkString(host) - _checkString(user) - _checkString(password) + _check_string(host) + _check_string(user) + _check_string(password) # The database engine's Services API requires that connection strings # conform to one of the following formats: @@ -214,8 +213,10 @@ host += ':' host += 'service_mgr' - return Connection(host, user, password) - + con = Connection(host, user, password) + for hook in fdb.get_hooks(fdb.HOOK_SERVICE_ATTACHED): + hook(con) + return con class Connection(object): """ @@ -254,10 +255,9 @@ # spb_length = 2 + 1 + 1 + len(self.user) + 1 + 1 + len(self.password) spb = fdb.bs([ibase.isc_spb_version, ibase.isc_spb_current_version, ibase.isc_spb_user_name, len(self.user)]) + self.user + \ - fdb.bs([ibase.isc_spb_password, - len(self.password)]) + self.password + fdb.bs([ibase.isc_spb_password, len(self.password)]) + self.password api.isc_service_attach(self._isc_status, len(self.host), self.host, - self._svc_handle, len(spb), spb) + self._svc_handle, len(spb), spb) if fdb.db_api_error(self._isc_status): raise fdb.exception_from_status(fdb.DatabaseError, self._isc_status, @@ -266,14 +266,14 @@ verstr = self.get_server_version() x = verstr.split() if x[0].find('V') > 0: - (x,self.__version) = x[0].split('V') + (x, self.__version) = x[0].split('V') elif x[0].find('T') > 0: - (x,self.__version) = x[0].split('T') + (x, self.__version) = x[0].split('T') else: # Unknown version self.__version = '0.0.0.0' x = self.__version.split('.') - self.__engine_version = float('%s.%s' % (x[0],x[1])) + self.__engine_version = float('%s.%s' % (x[0], x[1])) def __del__(self): self.close() def next(self): @@ -301,14 +301,11 @@ def __read_buffer(self, init=''): request = fdb.bs([ibase.isc_info_svc_to_eof]) spb = ibase.b('') - api.isc_service_query(self._isc_status, self._svc_handle, None, - len(spb), spb, - len(request), request, - ibase.USHRT_MAX, self._result_buffer) + api.isc_service_query(self._isc_status, self._svc_handle, None, len(spb), spb, + len(request), request, ibase.USHRT_MAX, self._result_buffer) if fdb.db_api_error(self._isc_status): - raise fdb.exception_from_status(fdb.DatabaseError, - self._isc_status, - "Services/isc_service_query:") + raise fdb.exception_from_status(fdb.DatabaseError, self._isc_status, + "Services/isc_service_query:") (result, _) = self._extract_string(self._result_buffer, 1) if ord(self._result_buffer[_]) == ibase.isc_info_end: self.__eof = True @@ -326,8 +323,9 @@ self.__read_buffer() if self._line_buffer: return self._line_buffer.pop(0) - self.__fetching = False - return None + else: + self.__fetching = False + return None def __get_version(self): return self.__version def __get_engine_version(self): @@ -346,21 +344,26 @@ return st def _extract_int(self, raw, index): new_index = index + ctypes.sizeof(ctypes.c_ushort) + return (fdb.bytes_to_uint(raw[index:new_index]), new_index) + def _extract_longint(self, raw, index): + new_index = index + ctypes.sizeof(ctypes.c_uint) return (fdb.bytes_to_int(raw[index:new_index]), new_index) def _extract_string(self, raw, index): (size, index) = self._extract_int(raw, index) new_index = index + size ### Todo: verify handling of P version differences, refactor if ibase.PYTHON_MAJOR_VER == 3: - return (str(raw[index:new_index], - ibase.charset_map.get(self.charset, self.charset)), new_index) + return (str(raw[index:new_index], ibase.charset_map.get(self.charset, self.charset)), + new_index) else: return (str(raw[index:new_index]), new_index) + def _extract_bytes(self, raw, index): + (size, index) = self._extract_int(raw, index) + new_index = index + size + return (bytes(raw[index:new_index]), new_index) def _Q(self, code, result_type, timeout=-1): if code < 0 or code > ibase.USHRT_MAX: - raise fdb.ProgrammingError("The service query request_buf code" - " must fall between 0 and %d," - " inclusive." % ibase.USHRT_MAX) + raise fdb.ProgrammingError("The service query request_buf code must fall between 0 and %d, inclusive." % ibase.USHRT_MAX) result = None result_size = 1024 request = fdb.bs([code]) @@ -371,18 +374,14 @@ while True: result_buffer = ctypes.create_string_buffer(result_size) api.isc_service_query(self._isc_status, self._svc_handle, None, - len(spb), spb, - len(request), request, - result_size, result_buffer) + len(spb), spb, + len(request), request, + result_size, result_buffer) if fdb.db_api_error(self._isc_status): - raise fdb.exception_from_status(fdb.DatabaseError, - self._isc_status, - "Services/isc_service_query:") + raise fdb.exception_from_status(fdb.DatabaseError, self._isc_status, "Services/isc_service_query:") if ord(result_buffer[0]) == ibase.isc_info_truncated: if result_size == ibase.USHRT_MAX: - raise fdb.InternalError("Database C API constraints maximum" - "result buffer size to %d" - % ibase.USHRT_MAX) + raise fdb.InternalError("Database C API constraints maximum result buffer size to %d" % ibase.USHRT_MAX) else: result_size = result_size * 4 if result_size > ibase.USHRT_MAX: @@ -432,15 +431,10 @@ return self._Q(code, self.QUERY_TYPE_RAW) def _action_thin(self, request_buffer): if len(request_buffer) > ibase.USHRT_MAX: - raise fdb.ProgrammingError("The size of the request buffer" - " must not exceed %d." - % ibase.USHRT_MAX) - api.isc_service_start(self._isc_status, self._svc_handle, None, - len(request_buffer), request_buffer) + raise fdb.ProgrammingError("The size of the request buffer must not exceed %d." % ibase.USHRT_MAX) + api.isc_service_start(self._isc_status, self._svc_handle, None, len(request_buffer), request_buffer) if fdb.db_api_error(self._isc_status): - raise fdb.exception_from_status(fdb.OperationalError, - self._isc_status, - "Unable to perform the requested Service API action:") + raise fdb.exception_from_status(fdb.OperationalError, self._isc_status, "Unable to perform the requested Service API action:") return None def _act(self, request_buffer): return self._action_thin(request_buffer.render()) @@ -454,7 +448,7 @@ # either way. # This enhancement should be a very low priority; the Service Manager # API is not typically used for performance-intensive operations. - resultLines = [] + result_lines = [] while 1: try: line = self._QS(ibase.isc_info_svc_line) @@ -467,8 +461,8 @@ break if not line: break - resultLines.append(line) - return line_separator.join(resultLines) + result_lines.append(line) + return line_separator.join(result_lines) def _repair_action(self, database, partial_req_buf, line_separator='\n'): # Begin constructing the request buffer (incorporate the one passed as # param $partial_req_buf). @@ -484,21 +478,21 @@ # cause the program to block until the Services Manager is finished # with the action). return self._collect_unformatted_results(line_separator=line_separator) - def _validate_companion_string_numeric_sequences(self,strings, numbers, + def _validate_companion_string_numeric_sequences(self, strings, numbers, string_caption, number_caption): # The core constraint here is that len(numbers) must equal len(strings) - 1 - stringsCount = len(strings) - numbersCount = len(numbers) + strings_count = len(strings) + numbers_count = len(numbers) - requiredNumbersCount = stringsCount - 1 + required_numbers_count = strings_count - 1 - if numbersCount != requiredNumbersCount: + if numbers_count != required_numbers_count: raise ValueError( 'Since you passed %d %s, you must %s corresponding %s.' - % (stringsCount, string_caption, - ('pass %d' % requiredNumbersCount - if requiredNumbersCount > 0 + % (strings_count, string_caption, + ('pass %d' % required_numbers_count + if required_numbers_count > 0 else 'not pass any'), number_caption) ) @@ -514,10 +508,10 @@ # We know the following call to _checkString will raise an exception, # but calling it anyway allows us to centralize the error message # generation: - _checkString(x) + _check_string(x) ### Todo: verify handling of P version differences, refactor? for el in x: - _checkString(el) + _check_string(el) return x def _property_action(self, database, partial_req_buf): # Begin constructing the request buffer (incorporate the one passed as @@ -533,11 +527,10 @@ # Return the results to the caller synchronously # because it blocks until there's been some resolution of the action. return self._collect_unformatted_results() - def _property_action_with_one_num_code(self, database, code, num, - num_ctype = 'I'): - reqBuf = _ServiceActionRequestBuilder() - reqBuf.add_numeric(code, num, numCType=num_ctype) - return self._property_action(database, reqBuf) + def _property_action_with_one_num_code(self, database, code, num, num_ctype='I'): + req_buf = _ServiceActionRequestBuilder() + req_buf.add_numeric(code, num, numctype=num_ctype) + return self._property_action(database, req_buf) def close(self): """Close the connection now (rather than whenever `__del__` is called). The connection will be unusable from this point forward; an :exc:`Error` @@ -548,7 +541,7 @@ api.isc_service_detach(self._isc_status, self._svc_handle) if fdb.db_api_error(self._isc_status): raise fdb.exception_from_status(fdb.DatabaseError, - self._isc_status, "Services/isc_service_detach:") + self._isc_status, "Services/isc_service_detach:") self._svc_handle = None self.__fetching = False def readline(self): @@ -564,7 +557,7 @@ """Get list of remaining output lines from last service query. :returns list: Service output. - :raises ProgrammingError: When service is not in :attr:`fetching` mode. + :raises `~fdb.ProgrammingError`: When service is not in :attr:`fetching` mode. """ return [line for line in self] def isrunning(self): @@ -578,13 +571,11 @@ """ return self._QI(ibase.isc_info_svc_running) > 0 def wait(self): - """Wait until running service completes. + """Wait until running service completes, i.e. stops sending data. """ - if self.isrunning: - x = 1 - while x: - x = self.__fetchline() - self.__fetching = False + while self.isrunning(): + for x in self: + pass def get_service_manager_version(self): """Get Firebird Service Manager version number. @@ -641,7 +632,7 @@ CAPABILITY_QUOTED_FILENAME CAPABILITY_NO_SERVER_SHUTDOWN - Example:: + **Example**:: >>>fdb.services.CAPABILITY_REMOTE_HOP in svc.get_server_capabilities() True @@ -664,7 +655,7 @@ def get_connection_count(self): """Get number of attachments to server. - :returns integer: Directory path. + :returns integer: Number of attachments. """ self.__check_active() return self._get_isc_info_svc_svr_db_info()[0] @@ -703,38 +694,38 @@ :param string database: Database filename or alias. :returns list: Transaction IDs. - :raises InternalError: When can't process the result buffer. + :raises `~fdb.InternalError`: When can't process the result buffer. """ self.__check_active() - _checkString(database) + _check_string(database) - reqBuf = _ServiceActionRequestBuilder() - reqBuf.add_option_mask(ibase.isc_spb_rpr_list_limbo_trans) - raw = self._repair_action(database, reqBuf, line_separator='') + req_buf = _ServiceActionRequestBuilder() + req_buf.add_option_mask(ibase.isc_spb_rpr_list_limbo_trans) + raw = self._repair_action(database, req_buf, line_separator='') raw = ibase.b(raw) - nBytes = len(raw) + nbytes = len(raw) - transIDs = [] + trans_ids = [] i = 0 - while i < nBytes: + while i < nbytes: byte = ibase.ord2(raw[i]) if byte in (ibase.isc_spb_single_tra_id, ibase.isc_spb_multi_tra_id): # The transaction ID is a 32-bit integer that begins # immediately after position i. - transID = struct.unpack('i', raw[i + 1:i + 5])[0] + trans_id = struct.unpack('i', raw[i + 1:i + 5])[0] i += 5 # Advance past the marker byte and the 32-bit integer. - transIDs.append(transID) + trans_ids.append(trans_id) else: raise fdb.InternalError('Unable to process buffer contents' - ' beginning at position %d.' % i) - return transIDs + ' beginning at position %d.' % i) + return trans_ids def _resolve_limbo_transaction(self, resolution, database, transaction_id): - _checkString(database) + _check_string(database) - reqBuf = _ServiceActionRequestBuilder() - reqBuf.add_numeric(resolution, transaction_id) - self._repair_action(database, reqBuf) + req_buf = _ServiceActionRequestBuilder() + req_buf.add_numeric(resolution, transaction_id) + self._repair_action(database, req_buf) def commit_limbo_transaction(self, database, transaction_id): """Resolve limbo transaction with commit. @@ -753,17 +744,10 @@ self.__check_active() self._resolve_limbo_transaction(ibase.isc_spb_rpr_rollback_trans, database, transaction_id) - def get_statistics(self, database, - show_only_db_log_pages=0, - show_only_db_header_pages=0, - show_user_data_pages=1, - show_user_index_pages=1, - # 2004.06.06: False by default b/c gstat behaves that way: - show_system_tables_and_indexes=0, - show_record_versions=0, - callback=None, - tables=None - ): + def get_statistics(self, database, show_only_db_log_pages=0, + show_only_db_header_pages=0, show_user_data_pages=1, + show_user_index_pages=1, show_system_tables_and_indexes=0, + show_record_versions=0, callback=None, tables=None): """Request database statisctics. **(ASYNC service)** :param string database: Database specification. @@ -790,38 +774,38 @@ fail with exception. """ self.__check_active() - _checkString(database) + _check_string(database) - reqBuf = _ServiceActionRequestBuilder(ibase.isc_action_svc_db_stats) + req_buf = _ServiceActionRequestBuilder(ibase.isc_action_svc_db_stats) # Request for header negates all other options if show_only_db_header_pages: show_only_db_log_pages = show_user_data_pages = 0 - show_user_index_pages = show_system_tables_and_indexes =0 + show_user_index_pages = show_system_tables_and_indexes = 0 show_record_versions = 0 - optionMask = 0 + option_mask = 0 if show_user_data_pages: - optionMask |= ibase.isc_spb_sts_data_pages + option_mask |= ibase.isc_spb_sts_data_pages if show_only_db_log_pages: - optionMask |= ibase.isc_spb_sts_db_log + option_mask |= ibase.isc_spb_sts_db_log if show_only_db_header_pages: - optionMask |= ibase.isc_spb_sts_hdr_pages + option_mask |= ibase.isc_spb_sts_hdr_pages if show_user_index_pages: - optionMask |= ibase.isc_spb_sts_idx_pages + option_mask |= ibase.isc_spb_sts_idx_pages if show_system_tables_and_indexes: - optionMask |= ibase.isc_spb_sts_sys_relations + option_mask |= ibase.isc_spb_sts_sys_relations if show_record_versions: - optionMask |= ibase.isc_spb_sts_record_versions + option_mask |= ibase.isc_spb_sts_record_versions - reqBuf.add_database_name(database) - reqBuf.add_option_mask(optionMask) + req_buf.add_database_name(database) + req_buf.add_option_mask(option_mask) if tables is not None: - if isinstance(tables, types.StringTypes): + if isinstance(tables, ibase.StringTypes): tables = (tables,) cmdline = ['-t'] cmdline.extend(tables) - reqBuf.add_string(ibase.isc_spb_command_line, ' '.join(cmdline)) - self._act(reqBuf) + req_buf.add_string(ibase.isc_spb_command_line, ' '.join(cmdline)) + self._act(req_buf) self.__fetching = True self.__eof = False if callback: @@ -839,7 +823,7 @@ metadata_only=0, collect_garbage=1, transportable=1, - convert_external_tables_to_internal=0, + convert_external_tables=0, compressed=1, no_db_triggers=0, callback=None, @@ -856,7 +840,7 @@ :param integer metadata_only: `1` to create only metadata backup. :param integer collect_garbage: `0` to skip garbage collection. :param integer transportable: `0` to do not create transportable backup. - :param integer convert_external_tables_to_internal: `1` to convert + :param integer convert_external_tables: `1` to convert external table to internal ones. :param integer compressed: `0` to create uncompressed backup. :param integer no_db_triggers: `1` to disable database triggers temporarily. @@ -876,84 +860,66 @@ """ self.__check_active() # Begin parameter validation section. - _checkString(source_database) + _check_string(source_database) dest_filenames = self._require_str_or_tuple_of_str(dest_filenames) - destFilenamesCount = len(dest_filenames) + dest_filenames_count = len(dest_filenames) # 2004.07.17: YYY: Temporary warning: # Current (1.5.1) versions of the database engine appear to hang the # Services API request when it contains more than 11 destFilenames - if destFilenamesCount > 11: - warnings.warn( - 'Current versions of the database engine appear to hang when' - ' passed a request to generate a backup with more than 11' - ' constituents.', - RuntimeWarning - ) + if dest_filenames_count > 11: + warnings.warn('Current versions of the database engine appear to hang when' + ' passed a request to generate a backup with more than 11' + ' constituents.', RuntimeWarning) - if destFilenamesCount > 9999: + if dest_filenames_count > 9999: raise fdb.ProgrammingError("The database engine cannot output a" - " single source database to more than 9999 backup files." - ) - self._validate_companion_string_numeric_sequences( - dest_filenames, dest_file_sizes, - 'destination filenames', 'destination file sizes' - ) - - if len(self._exclude_elements_of_types(dest_file_sizes, - (int, ibase.mylong))) > 0: - raise TypeError("Every element of destFileSizes must be an int" - " or long." - ) - destFileSizesCount = len(dest_file_sizes) + " single source database to more than 9999 backup files.") + self._validate_companion_string_numeric_sequences(dest_filenames, dest_file_sizes, + 'destination filenames', 'destination file sizes') + + if len(self._exclude_elements_of_types(dest_file_sizes, (int, ibase.mylong))) > 0: + raise TypeError("Every element of dest_file_sizes must be an int or long.") + dest_file_sizes_count = len(dest_file_sizes) # The following should have already been checked by # _validateCompanionStringNumericSequences. - assert destFileSizesCount == destFilenamesCount - 1 + assert dest_file_sizes_count == dest_filenames_count - 1 # End parameter validation section. # Begin option bitmask setup section. - optionMask = 0 + option_mask = 0 if ignore_checksums: - optionMask |= ibase.isc_spb_bkp_ignore_checksums + option_mask |= ibase.isc_spb_bkp_ignore_checksums if ignore_limbo_transactions: - optionMask |= ibase.isc_spb_bkp_ignore_limbo + option_mask |= ibase.isc_spb_bkp_ignore_limbo if metadata_only: - optionMask |= ibase.isc_spb_bkp_metadata_only + option_mask |= ibase.isc_spb_bkp_metadata_only if not collect_garbage: - optionMask |= ibase.isc_spb_bkp_no_garbage_collect + option_mask |= ibase.isc_spb_bkp_no_garbage_collect if not transportable: - optionMask |= ibase.isc_spb_bkp_non_transportable - if convert_external_tables_to_internal: - optionMask |= ibase.isc_spb_bkp_convert + option_mask |= ibase.isc_spb_bkp_non_transportable + if convert_external_tables: + option_mask |= ibase.isc_spb_bkp_convert if not compressed: - optionMask |= ibase.isc_spb_bkp_expand + option_mask |= ibase.isc_spb_bkp_expand if no_db_triggers: - optionMask |= ibase.isc_spb_bkp_no_triggers + option_mask |= ibase.isc_spb_bkp_no_triggers # End option bitmask setup section. - # Construct the request buffer. request = _ServiceActionRequestBuilder(ibase.isc_action_svc_backup) - # Source database filename: request.add_database_name(source_database) - # Backup filenames and sizes: - request.add_string_numeric_pairs_sequence( - ibase.isc_spb_bkp_file, dest_filenames, - ibase.isc_spb_bkp_length, dest_file_sizes - ) - + request.add_string_numeric_pairs_sequence(ibase.isc_spb_bkp_file, dest_filenames, + ibase.isc_spb_bkp_length, dest_file_sizes) # Options bitmask: - request.add_numeric(ibase.isc_spb_options, optionMask) - + request.add_numeric(ibase.isc_spb_options, option_mask) # Tell the service to make its output available to us. request.add_code(ibase.isc_spb_verbose) - # handle request for run-time statistics if stats: request.add_string(ibase.isc_spb_bkp_stat, ''.join(stats)) - # Done constructing the request buffer. self._act(request) self.__fetching = True @@ -961,6 +927,85 @@ if callback: for line in self: callback(line) + def local_backup(self, + source_database, + backup_stream, + # Backup operation optionMask: + ignore_checksums=0, + ignore_limbo_transactions=0, + metadata_only=0, + collect_garbage=1, + transportable=1, + convert_external_tables=0, + compressed=1, + no_db_triggers=0): + """Request logical (GBAK) database backup into local byte stream. **(SYNC service)** + + :param string source_database: Source database specification. + :param backup_stream: Backup stream. + :param integer ignore_checksums: `1` to ignore checksums. + :param integer ignore_limbo_transactions: `1` to ignore limbo transactions. + :param integer metadata_only: `1` to create only metadata backup. + :param integer collect_garbage: `0` to skip garbage collection. + :param integer transportable: `0` to do not create transportable backup. + :param integer convert_external_tables: `1` to convert + external table to internal ones. + :param integer compressed: `0` to create uncompressed backup. + :param integer no_db_triggers: `1` to disable database triggers temporarily. + """ + self.__check_active() + # Begin parameter validation section. + _check_string(source_database) + + # Begin option bitmask setup section. + option_mask = 0 + if ignore_checksums: + option_mask |= ibase.isc_spb_bkp_ignore_checksums + if ignore_limbo_transactions: + option_mask |= ibase.isc_spb_bkp_ignore_limbo + if metadata_only: + option_mask |= ibase.isc_spb_bkp_metadata_only + if not collect_garbage: + option_mask |= ibase.isc_spb_bkp_no_garbage_collect + if not transportable: + option_mask |= ibase.isc_spb_bkp_non_transportable + if convert_external_tables: + option_mask |= ibase.isc_spb_bkp_convert + if not compressed: + option_mask |= ibase.isc_spb_bkp_expand + if no_db_triggers: + option_mask |= ibase.isc_spb_bkp_no_triggers + # End option bitmask setup section. + + # Construct the request buffer. + request = _ServiceActionRequestBuilder(ibase.isc_action_svc_backup) + + # Source database filename: + request.add_database_name(source_database) + + # Backup file transported via stdout: + request.add_string(ibase.isc_spb_bkp_file, 'stdout') + + # Options bitmask: + request.add_numeric(ibase.isc_spb_options, option_mask) + + # Done constructing the request buffer. + self._act(request) + eof = False + request = fdb.bs([ibase.isc_info_svc_to_eof]) + spb = ibase.b('') + while not eof: + api.isc_service_query(self._isc_status, self._svc_handle, None, + len(spb), spb, + len(request), request, + ibase.USHRT_MAX, self._result_buffer) + if fdb.db_api_error(self._isc_status): + raise fdb.exception_from_status(fdb.DatabaseError, self._isc_status, + "Services/isc_service_query:") + (result, _) = self._extract_bytes(self._result_buffer, 1) + if ord(self._result_buffer[_]) == ibase.isc_info_end: + eof = True + backup_stream.write(result) def restore(self, source_filenames, dest_filenames, dest_file_pages=(), @@ -1026,68 +1071,54 @@ # End parameter validation section. # Begin option bitmask setup section. - optionMask = 0 + option_mask = 0 if replace: - optionMask |= ibase.isc_spb_res_replace + option_mask |= ibase.isc_spb_res_replace else: - optionMask |= ibase.isc_spb_res_create + option_mask |= ibase.isc_spb_res_create if deactivate_indexes: - optionMask |= ibase.isc_spb_res_deactivate_idx + option_mask |= ibase.isc_spb_res_deactivate_idx if do_not_restore_shadows: - optionMask |= ibase.isc_spb_res_no_shadow + option_mask |= ibase.isc_spb_res_no_shadow if do_not_enforce_constraints: - optionMask |= ibase.isc_spb_res_no_validity + option_mask |= ibase.isc_spb_res_no_validity if commit_after_each_table: - optionMask |= ibase.isc_spb_res_one_at_a_time + option_mask |= ibase.isc_spb_res_one_at_a_time if use_all_page_space: - optionMask |= ibase.isc_spb_res_use_all_space + option_mask |= ibase.isc_spb_res_use_all_space if no_db_triggers: - optionMask |= ibase.isc_spb_bkp_no_triggers + option_mask |= ibase.isc_spb_bkp_no_triggers if metadata_only: - optionMask |= ibase.isc_spb_bkp_metadata_only + option_mask |= ibase.isc_spb_bkp_metadata_only # End option bitmask setup section. - # Construct the request buffer. request = _ServiceActionRequestBuilder(ibase.isc_action_svc_restore) - # Backup filenames: request.add_string_sequence(ibase.isc_spb_bkp_file, source_filenames) - # Database filenames: - request.add_string_numeric_pairs_sequence( - ibase.isc_spb_dbname, dest_filenames, - ibase.isc_spb_res_length, dest_file_pages - ) - + request.add_string_numeric_pairs_sequence(ibase.isc_spb_dbname, dest_filenames, + ibase.isc_spb_res_length, dest_file_pages) # Page size of the restored database: if page_size: request.add_numeric(ibase.isc_spb_res_page_size, page_size) - # cacheBuffers is the number of default cache buffers to configure for # attachments to the restored database: if cache_buffers: request.add_numeric(ibase.isc_spb_res_buffers, cache_buffers) - # accessModeReadOnly controls whether the restored database is # "mounted" in read only or read-write mode: if access_mode_read_only: - accessMode = ibase.isc_spb_prp_am_readonly + access_mode = ibase.isc_spb_prp_am_readonly else: - accessMode = ibase.isc_spb_prp_am_readwrite - request.add_numeric(ibase.isc_spb_res_access_mode, accessMode, - numCType='B' - ) - + access_mode = ibase.isc_spb_prp_am_readwrite + request.add_numeric(ibase.isc_spb_res_access_mode, access_mode, numctype='B') # Options bitmask: - request.add_numeric(ibase.isc_spb_options, optionMask) - + request.add_numeric(ibase.isc_spb_options, option_mask) # Tell the service to make its output available to us. request.add_code(ibase.isc_spb_verbose) - # handle request for run-time statistics if stats: request.add_string(ibase.isc_spb_res_stat, ''.join(stats)) - # Done constructing the request buffer. self._act(request) self.__fetching = True @@ -1095,6 +1126,146 @@ if callback: for line in self: callback(line) + def local_restore(self, + backup_stream, + dest_filenames, dest_file_pages=(), + page_size=None, + cache_buffers=None, + access_mode_read_only=0, + replace=0, + deactivate_indexes=0, + do_not_restore_shadows=0, + do_not_enforce_constraints=0, + commit_after_each_table=0, + use_all_page_space=0, + no_db_triggers=0, + metadata_only=0): + """Request database restore from logical (GBAK) backup stored in local byte stream. **(SYNC service)** + + :param backup_stream: Backup stream. + :param dest_filenames: Database file(s) specification. + :type dest_filenames: string or tuple of strings + :param dest_file_pages: (optional) specification of database file max. + # of pages. + :type dest_file_pages: tuple of integers + :param integer page_size: (optional) Page size. + :param integer cache_buffers: (optional) Size of page-cache for this + database. + :param integer access_mode_read_only: `1` to create R/O database. + :param integer replace: `1` to replace existing database. + :param integer deactivate_indexes: `1` to do not activate indices. + :param integer do_not_restore_shadows: `1` to do not restore shadows. + :param integer do_not_enforce_constraints: `1` to do not enforce + constraints during restore. + :param integer commit_after_each_table: `1` to commit after each table + is restored. + :param integer use_all_page_space: `1` to use all space on data pages. + :param integer no_db_triggers: `1` to disable database triggers temporarily. + :param integer metadata_only: `1` to restore only database metadata. + """ + self.__check_active() + # Begin parameter validation section. + dest_filenames = self._require_str_or_tuple_of_str(dest_filenames) + + self._validate_companion_string_numeric_sequences( + dest_filenames, dest_file_pages, + 'destination filenames', 'destination file page counts' + ) + # End parameter validation section. + + # Begin option bitmask setup section. + option_mask = 0 + if replace: + option_mask |= ibase.isc_spb_res_replace + else: + option_mask |= ibase.isc_spb_res_create + if deactivate_indexes: + option_mask |= ibase.isc_spb_res_deactivate_idx + if do_not_restore_shadows: + option_mask |= ibase.isc_spb_res_no_shadow + if do_not_enforce_constraints: + option_mask |= ibase.isc_spb_res_no_validity + if commit_after_each_table: + option_mask |= ibase.isc_spb_res_one_at_a_time + if use_all_page_space: + option_mask |= ibase.isc_spb_res_use_all_space + if no_db_triggers: + option_mask |= ibase.isc_spb_bkp_no_triggers + if metadata_only: + option_mask |= ibase.isc_spb_bkp_metadata_only + # End option bitmask setup section. + # Construct the request buffer. + request = _ServiceActionRequestBuilder(ibase.isc_action_svc_restore) + # Backup stream: + request.add_string(ibase.isc_spb_bkp_file, 'stdin') + # Database filenames: + request.add_string_numeric_pairs_sequence(ibase.isc_spb_dbname, dest_filenames, + ibase.isc_spb_res_length, dest_file_pages) + # Page size of the restored database: + if page_size: + request.add_numeric(ibase.isc_spb_res_page_size, page_size) + # cacheBuffers is the number of default cache buffers to configure for + # attachments to the restored database: + if cache_buffers: + request.add_numeric(ibase.isc_spb_res_buffers, cache_buffers) + # accessModeReadOnly controls whether the restored database is + # "mounted" in read only or read-write mode: + if access_mode_read_only: + access_mode = ibase.isc_spb_prp_am_readonly + else: + access_mode = ibase.isc_spb_prp_am_readwrite + request.add_numeric(ibase.isc_spb_res_access_mode, access_mode, numctype='B') + # Options bitmask: + request.add_numeric(ibase.isc_spb_options, option_mask) + # Done constructing the request buffer. + self._act(request) + + request_length = 0 + stop = False + pos = backup_stream.tell() + backup_stream.seek(0, 2) + bytes_available = backup_stream.tell() - pos + backup_stream.seek(pos) + spb = ctypes.create_string_buffer(16) + spb[0] = ibase.int2byte(ibase.isc_info_svc_timeout) + spb[1:3] = fdb.uint_to_bytes(4, 2) + spb[3:7] = fdb.uint_to_bytes(1, 4) + spb[7] = ibase.int2byte(ibase.isc_info_end) + wait = True + while not stop: + if request_length > 0: + request_length = min([request_length, 65500]) + raw = backup_stream.read(request_length) + if len(spb) < request_length+4: + spb = ctypes.create_string_buffer(request_length+4) + spb[0] = ibase.int2byte(ibase.isc_info_svc_line) + spb[1:3] = fdb.uint_to_bytes(len(raw), 2) + spb[3:3+len(raw)] = raw + spb[3+len(raw)] = ibase.int2byte(ibase.isc_info_end) + bytes_available -= len(raw) + req = fdb.bs([ibase.isc_info_svc_stdin, ibase.isc_info_svc_line]) + api.isc_service_query(self._isc_status, self._svc_handle, None, + len(spb), spb, + len(req), req, + ibase.USHRT_MAX, self._result_buffer) + if fdb.db_api_error(self._isc_status): + raise fdb.exception_from_status(fdb.DatabaseError, self._isc_status, + "Services/isc_service_query:") + i = 0 + request_length = 0 + while self._result_buffer[i] != ibase.int2byte(ibase.isc_info_end): + code = ibase.ord2(self._result_buffer[i]) + i += 1 + if code == ibase.isc_info_svc_stdin: + (request_length, i) = self._extract_longint(self._result_buffer, i) + elif code == ibase.isc_info_svc_line: + (line, i) = self._extract_string(self._result_buffer, i) + else: + pass + if not wait: + stop = (request_length == 0) and (len(line) == 0) + elif request_length != 0: + wait = False # nbackup methods: def nbackup(self, source_database, dest_filename, @@ -1111,14 +1282,14 @@ """ self.__check_active() # Begin parameter validation section. - _checkString(source_database) - _checkString(dest_filename) + _check_string(source_database) + _check_string(dest_filename) dest_filename = ibase.b(dest_filename) # Begin option bitmask setup section. - optionMask = 0 + option_mask = 0 if no_db_triggers: - optionMask |= ibase.isc_spb_bkp_no_triggers + option_mask |= ibase.isc_spb_bkp_no_triggers # End option bitmask setup section. # Construct the request buffer. @@ -1134,7 +1305,7 @@ request.add_numeric(ibase.isc_spb_nbk_level, nbackup_level) # Options bitmask: - request.add_numeric(ibase.isc_spb_options, optionMask) + request.add_numeric(ibase.isc_spb_options, option_mask) # Done constructing the request buffer. self._act(request) @@ -1154,13 +1325,13 @@ self.__check_active() # Begin parameter validation section. source_filenames = self._require_str_or_tuple_of_str(source_filenames) - _checkString(dest_filename) + _check_string(dest_filename) dest_filename = ibase.b(dest_filename) # Begin option bitmask setup section. - optionMask = 0 + option_mask = 0 if no_db_triggers: - optionMask |= ibase.isc_spb_bkp_no_triggers + option_mask |= ibase.isc_spb_bkp_no_triggers # End option bitmask setup section. # Construct the request buffer. @@ -1173,7 +1344,7 @@ request.add_string_sequence(ibase.isc_spb_nbk_file, source_filenames) # Options bitmask: - request.add_numeric(ibase.isc_spb_options, optionMask) + request.add_numeric(ibase.isc_spb_options, option_mask) # Done constructing the request buffer. self._act(request) @@ -1185,7 +1356,7 @@ :param string config: Trace session configuration. :param string name: (optional) Trace session name. :returns integer: Trace session ID. - :raises DatabaseError: When session ID is not returned on start. + :raises fdb.DatabaseError: When session ID is not returned on start. Trace session output could be retrieved through :meth:`readline`, :meth:`readlines`, iteration over `Connection` or ignored via call to @@ -1198,17 +1369,17 @@ including call to any `trace_` method will fail with exception. """ self.__check_active() - if not name == None: - _checkString(name) - _checkString(config) + if not name is None: + _check_string(name) + _check_string(config) # Construct the request buffer. - reqBuf = _ServiceActionRequestBuilder(ibase.isc_action_svc_trace_start) + req_buf = _ServiceActionRequestBuilder(ibase.isc_action_svc_trace_start) # trace name: - if not name == None: - reqBuf.add_string(ibase.isc_spb_trc_name, name) + if not name is None: + req_buf.add_string(ibase.isc_spb_trc_name, name) # trace configuration: - reqBuf.add_string(ibase.isc_spb_trc_cfg, config) - self._act(reqBuf) + req_buf.add_string(ibase.isc_spb_trc_cfg, config) + self._act(req_buf) self.__fetching = True self.__eof = False response = self._Q(ibase.isc_info_svc_line, self.QUERY_TYPE_PLAIN_STRING) @@ -1222,15 +1393,15 @@ :param integer trace_id: Trace session ID. :returns string: Text with confirmation that session was stopped. - :raises DatabaseError: When trace session is not stopped. - :raises OperationalError: When server can't perform requested operation. + :raises `~fdb.DatabaseError`: When trace session is not stopped. + :raises `~fdb.OperationalError`: When server can't perform requested operation. """ self.__check_active() # Construct the request buffer. - reqBuf = _ServiceActionRequestBuilder(ibase.isc_action_svc_trace_stop) - reqBuf.add_numeric(ibase.isc_spb_trc_id, trace_id) + req_buf = _ServiceActionRequestBuilder(ibase.isc_action_svc_trace_stop) + req_buf.add_numeric(ibase.isc_spb_trc_id, trace_id) - response = self._act_and_return_textual_results(reqBuf) + response = self._act_and_return_textual_results(req_buf) if not response.startswith("Trace session ID %i stopped" % trace_id): # response should contain the error message raise fdb.DatabaseError(response) @@ -1240,16 +1411,15 @@ :param integer trace_id: Trace session ID. :returns string: Text with confirmation that session was paused. - :raises DatabaseError: When trace session is not paused. - :raises OperationalError: When server can't perform requested operation. + :raises `~fdb.DatabaseError`: When trace session is not paused. + :raises `~fdb.OperationalError`: When server can't perform requested operation. """ self.__check_active() # Construct the request buffer. - reqBuf = _ServiceActionRequestBuilder( - ibase.isc_action_svc_trace_suspend) - reqBuf.add_numeric(ibase.isc_spb_trc_id, trace_id) + req_buf = _ServiceActionRequestBuilder(ibase.isc_action_svc_trace_suspend) + req_buf.add_numeric(ibase.isc_spb_trc_id, trace_id) - response = self._act_and_return_textual_results(reqBuf) + response = self._act_and_return_textual_results(req_buf) if not response.startswith("Trace session ID %i paused" % trace_id): # response should contain the error message raise fdb.DatabaseError(response) @@ -1259,16 +1429,15 @@ :param integer trace_id: Trace session ID. :returns string: Text with confirmation that session was resumed. - :raises DatabaseError: When trace session is not resumed. - :raises OperationalError: When server can't perform requested operation. + :raises `~fdb.DatabaseError`: When trace session is not resumed. + :raises `~fdb.OperationalError`: When server can't perform requested operation. """ self.__check_active() # Construct the request buffer. - reqBuf = _ServiceActionRequestBuilder( - ibase.isc_action_svc_trace_resume) - reqBuf.add_numeric(ibase.isc_spb_trc_id, trace_id) + req_buf = _ServiceActionRequestBuilder(ibase.isc_action_svc_trace_resume) + req_buf.add_numeric(ibase.isc_spb_trc_id, trace_id) - response = self._act_and_return_textual_results(reqBuf) + response = self._act_and_return_textual_results(req_buf) if not response.startswith("Trace session ID %i resumed" % trace_id): # response should contain the error message raise fdb.DatabaseError(response) @@ -1285,13 +1454,13 @@ :user: (string) Trace user name. :flags: (list of strings) Session flags. - :raises OperationalError: When server can't perform requested operation. + :raises `~fdb.OperationalError`: When server can't perform requested operation. """ self.__check_active() # Construct the request buffer. - reqBuf = _ServiceActionRequestBuilder(ibase.isc_action_svc_trace_list) + req_buf = _ServiceActionRequestBuilder(ibase.isc_action_svc_trace_list) # Get and parse the returned list. - session_list = self._act_and_return_textual_results(reqBuf) + session_list = self._act_and_return_textual_results(req_buf) result = {} session_id = None for line in session_list.split('\n'): @@ -1306,7 +1475,7 @@ result[session_id]["user"] = line.split(':')[1].strip() elif line.lstrip().startswith("date:"): result[session_id]["date"] = datetime.datetime.strptime( - line.split(':',1)[1].strip(), + line.split(':', 1)[1].strip(), '%Y-%m-%d %H:%M:%S') elif line.lstrip().startswith("flags:"): result[session_id]["flags"] = line.split(':')[1].strip().split(',') @@ -1321,7 +1490,7 @@ :param integer n: Number of pages. """ self.__check_active() - _checkString(database) + _check_string(database) self._property_action_with_one_num_code(database, ibase.isc_spb_prp_page_buffers, n) @@ -1332,7 +1501,7 @@ :param integer n: Sweep treshold, or `0` to disable automatic sweep. """ self.__check_active() - _checkString(database) + _check_string(database) self._property_action_with_one_num_code(database, ibase.isc_spb_prp_sweep_interval, n) @@ -1343,14 +1512,14 @@ :param boolean reserve_space: `True` to reserve space, `False` to do not. """ self.__check_active() - _checkString(database) + _check_string(database) if reserve_space: - reserveCode = ibase.isc_spb_prp_res + reserve_code = ibase.isc_spb_prp_res else: - reserveCode = ibase.isc_spb_prp_res_use_full + reserve_code = ibase.isc_spb_prp_res_use_full self._property_action_with_one_num_code(database, ibase.isc_spb_prp_reserve_space, - reserveCode, num_ctype='b') + reserve_code, num_ctype='b') def set_write_mode(self, database, mode): """Set Disk Write Mode: Sync (forced writes) or Async (buffered). @@ -1360,10 +1529,10 @@ :data:`~fdb.services.WRITE_BUFFERED` """ self.__check_active() - _checkString(database) + _check_string(database) if mode not in (WRITE_FORCED, WRITE_BUFFERED): raise ValueError('mode must be one of the following constants:' - ' fdb.services.WRITE_FORCED, fdb.services.WRITE_BUFFERED.') + ' fdb.services.WRITE_FORCED, fdb.services.WRITE_BUFFERED.') self._property_action_with_one_num_code(database, ibase.isc_spb_prp_write_mode, mode, num_ctype='b') @@ -1376,10 +1545,10 @@ :data:`~fdb.services.ACCESS_READ_ONLY` """ self.__check_active() - _checkString(database) + _check_string(database) if mode not in (ACCESS_READ_WRITE, ACCESS_READ_ONLY): raise ValueError('mode must be one of the following constants:' - ' fdb.services.ACCESS_READ_WRITE, fdb.services.ACCESS_READ_ONLY.') + ' fdb.services.ACCESS_READ_WRITE, fdb.services.ACCESS_READ_ONLY.') self._property_action_with_one_num_code(database, ibase.isc_spb_prp_access_mode, mode, num_ctype='b') @@ -1390,7 +1559,7 @@ :param integer dialect: `1` or `3`. """ self.__check_active() - _checkString(database) + _check_string(database) # The IB 6 API Guide says that dialect "must be 1 or 3", but other # dialects may become valid in future versions, so don't require # dialect in (1, 3) @@ -1403,20 +1572,20 @@ :param string database: Database filename or alias. """ self.__check_active() - _checkString(database) - reqBuf = _ServiceActionRequestBuilder() - reqBuf.add_option_mask(ibase.isc_spb_prp_activate) - self._property_action(database, reqBuf) + _check_string(database) + req_buf = _ServiceActionRequestBuilder() + req_buf.add_option_mask(ibase.isc_spb_prp_activate) + self._property_action(database, req_buf) def no_linger(self, database): """Set one-off override for database linger. :param string database: Database filename or alias. """ self.__check_active() - _checkString(database) - reqBuf = _ServiceActionRequestBuilder() - reqBuf.add_option_mask(ibase.isc_spb_prp_nolinger) - self._property_action(database, reqBuf) + _check_string(database) + req_buf = _ServiceActionRequestBuilder() + req_buf.add_option_mask(ibase.isc_spb_prp_nolinger) + self._property_action(database, req_buf) # Database repair/maintenance methods: def shutdown(self, database, shutdown_mode, shutdown_method, timeout): """Database shutdown. @@ -1434,24 +1603,24 @@ .. seealso:: See also :meth:`~Connection.bring_online` method. """ self.__check_active() - _checkString(database) + _check_string(database) if shutdown_mode not in (SHUT_LEGACY, SHUT_SINGLE, SHUT_MULTI, SHUT_FULL): raise ValueError('shutdown_mode must be one of the following' - ' constants: fdb.services.SHUT_LEGACY, fdb.services.SHUT_SINGLE,' - ' fdbfdb.services.SHUT_MULTI,' - ' fdb.services.SHUT_FULL.') + ' constants: fdb.services.SHUT_LEGACY, fdb.services.SHUT_SINGLE,' + ' fdbfdb.services.SHUT_MULTI,' + ' fdb.services.SHUT_FULL.') if shutdown_method not in (SHUT_FORCE, SHUT_DENY_NEW_TRANSACTIONS, SHUT_DENY_NEW_ATTACHMENTS): raise ValueError('shutdown_method must be one of the following' - ' constants: fdb.services.SHUT_FORCE,' - ' fdb.services.SHUT_DENY_NEW_TRANSACTIONS,' - ' fdb.services.SHUT_DENY_NEW_ATTACHMENTS.') - reqBuf = _ServiceActionRequestBuilder() + ' constants: fdb.services.SHUT_FORCE,' + ' fdb.services.SHUT_DENY_NEW_TRANSACTIONS,' + ' fdb.services.SHUT_DENY_NEW_ATTACHMENTS.') + req_buf = _ServiceActionRequestBuilder() if shutdown_mode != SHUT_LEGACY: - reqBuf.add_numeric(ibase.isc_spb_prp_shutdown_mode, - shutdown_mode, numCType='B') - reqBuf.add_numeric(shutdown_method, timeout, numCType='I') - self._property_action(database, reqBuf) + req_buf.add_numeric(ibase.isc_spb_prp_shutdown_mode, + shutdown_mode, numctype='B') + req_buf.add_numeric(shutdown_method, timeout, numctype='I') + self._property_action(database, req_buf) def bring_online(self, database, online_mode=SHUT_NORMAL): """Bring previously shut down database back online. @@ -1463,19 +1632,19 @@ .. seealso:: See also :meth:`~Connection.shutdown` method. """ self.__check_active() - _checkString(database) - if online_mode not in (SHUT_LEGACY, SHUT_NORMAL,SHUT_SINGLE, SHUT_MULTI): + _check_string(database) + if online_mode not in (SHUT_LEGACY, SHUT_NORMAL, SHUT_SINGLE, SHUT_MULTI): raise ValueError('online_mode must be one of the following' - ' constants: fdb.services.SHUT_LEGACY, fdb.services.SHUT_NORMAL,' - ' fdbfdb.services.SHUT_SINGLE,' - ' fdb.services.SHUT_MULTI.') - reqBuf = _ServiceActionRequestBuilder() + ' constants: fdb.services.SHUT_LEGACY, fdb.services.SHUT_NORMAL,' + ' fdbfdb.services.SHUT_SINGLE,' + ' fdb.services.SHUT_MULTI.') + req_buf = _ServiceActionRequestBuilder() if online_mode == SHUT_LEGACY: - reqBuf.add_option_mask(ibase.isc_spb_prp_db_online) + req_buf.add_option_mask(ibase.isc_spb_prp_db_online) else: - reqBuf.add_numeric(ibase.isc_spb_prp_online_mode, - online_mode, numCType='B') - self._property_action(database, reqBuf) + req_buf.add_numeric(ibase.isc_spb_prp_online_mode, + online_mode, numctype='B') + self._property_action(database, req_buf) def sweep(self, database): """Perform Database Sweep. @@ -1484,11 +1653,11 @@ :param string database: Database filename or alias. """ self.__check_active() - _checkString(database) - reqBuf = _ServiceActionRequestBuilder() - optionMask = ibase.isc_spb_rpr_sweep_db - reqBuf.add_option_mask(optionMask) - return self._repair_action(database, reqBuf) + _check_string(database) + req_buf = _ServiceActionRequestBuilder() + option_mask = ibase.isc_spb_rpr_sweep_db + req_buf.add_option_mask(option_mask) + return self._repair_action(database, req_buf) def repair(self, database, read_only_validation=0, ignore_checksums=0, @@ -1510,7 +1679,7 @@ .. note:: Method call will not return until action is finished. """ self.__check_active() - _checkString(database) + _check_string(database) # YYY: With certain option combinations, this method raises errors # that may not be very comprehensible to a Python programmer who's not # well versed with IB/FB. Should option combination filtering be @@ -1520,23 +1689,23 @@ # become outdated, or to inadvertently enforce an unnecessary, # crippling constraint on a certain option combination that the # database engine would have allowed. - reqBuf = _ServiceActionRequestBuilder() - optionMask = 0 + req_buf = _ServiceActionRequestBuilder() + option_mask = 0 if read_only_validation: - optionMask |= ibase.isc_spb_rpr_check_db + option_mask |= ibase.isc_spb_rpr_check_db if ignore_checksums: - optionMask |= ibase.isc_spb_rpr_ignore_checksum + option_mask |= ibase.isc_spb_rpr_ignore_checksum if kill_unavailable_shadows: - optionMask |= ibase.isc_spb_rpr_kill_shadows + option_mask |= ibase.isc_spb_rpr_kill_shadows if mend_database: - optionMask |= ibase.isc_spb_rpr_mend_db + option_mask |= ibase.isc_spb_rpr_mend_db if validate_database: - optionMask |= ibase.isc_spb_rpr_validate_db + option_mask |= ibase.isc_spb_rpr_validate_db if validate_record_fragments: - optionMask |= ibase.isc_spb_rpr_full - reqBuf.add_option_mask(optionMask) - return self._repair_action(database, reqBuf) + option_mask |= ibase.isc_spb_rpr_full + req_buf.add_option_mask(option_mask) + return self._repair_action(database, req_buf) # 2003.07.12: Removed method resolveLimboTransactions (dropped plans to # support that operation from kinterbasdb since transactions IDs are not @@ -1546,7 +1715,7 @@ def validate(self, database, include_tables=None, exclude_tables=None, include_indices=None, exclude_indices=None, - lock_timeout=None,callback=None): + lock_timeout=None, callback=None): """On-line database validation. :param string database: Database filename or alias. @@ -1577,7 +1746,7 @@ """ self.__check_active() - _checkString(database) + _check_string(database) request = _ServiceActionRequestBuilder(ibase.isc_action_svc_validate) request.add_database_name(database) if include_tables is not None: @@ -1589,7 +1758,7 @@ if exclude_indices is not None: request.add_string(ibase.isc_spb_val_idx_excl, exclude_indices) if lock_timeout is not None: - request.add_numeric(ibase.isc_spb_val_lock_timeout, lock_timeout, numCType='i') + request.add_numeric(ibase.isc_spb_val_lock_timeout, lock_timeout, numctype='i') # Done constructing the request buffer. self._act(request) @@ -1610,51 +1779,49 @@ self.__check_active() if user_name is not None: if isinstance(user_name, ibase.myunicode): - _checkString(user_name) + _check_string(user_name) user_name = ibase.b(user_name) - reqBuf = _ServiceActionRequestBuilder( - ibase.isc_action_svc_display_user - ) + req_buf = _ServiceActionRequestBuilder(ibase.isc_action_svc_display_user) if user_name: user_name = user_name.upper() - reqBuf.add_string(ibase.isc_spb_sec_username, user_name) - self._act(reqBuf) + req_buf.add_string(ibase.isc_spb_sec_username, user_name) + self._act(req_buf) raw = self._QR(ibase.isc_info_svc_get_users) users = [] - curUser = None + cur_user = None pos = 1 # Ignore raw[0]. upper_limit = len(raw) - 1 while pos < upper_limit: cluster = ibase.ord2(raw[pos]) pos += 1 if cluster == ibase.isc_spb_sec_username: - if curUser is not None: - users.append(curUser) - curUser = None + if cur_user is not None: + users.append(cur_user) + cur_user = None (user_name, pos) = self._extract_string(raw, pos) - curUser = User(user_name) + cur_user = User(user_name) elif cluster == ibase.isc_spb_sec_password: (password, pos) = self._extract_string(raw, pos) - curUser.password = password + cur_user.password = password elif cluster == ibase.isc_spb_sec_firstname: - (firstName, pos) = self._extract_string(raw, pos) - curUser.first_name = firstName + (first_name, pos) = self._extract_string(raw, pos) + cur_user.first_name = first_name elif cluster == ibase.isc_spb_sec_middlename: - (middleName, pos) = self._extract_string(raw, pos) - curUser.middle_name = middleName + (middle_name, pos) = self._extract_string(raw, pos) + cur_user.middle_name = middle_name elif cluster == ibase.isc_spb_sec_lastname: - (lastName, pos) = self._extract_string(raw, pos) - curUser.last_name = lastName + (last_name, pos) = self._extract_string(raw, pos) + cur_user.last_name = last_name elif cluster == ibase.isc_spb_sec_groupid: - (groupId, pos) = self._extract_int(raw, pos) - curUser.group_id = groupId + (group_id, pos) = self._extract_int(raw, pos) + cur_user.group_id = group_id elif cluster == ibase.isc_spb_sec_userid: - (userId, pos) = self._extract_int(raw, pos) - curUser.user_id = userId + (user_id, pos) = self._extract_int(raw, pos) + cur_user.user_id = user_id # Handle the last user: - if curUser is not None: - users.append(curUser) - curUser = None + if cur_user is not None: + users.append(cur_user) + cur_user = None return users def add_user(self, user): """Add new user. @@ -1673,31 +1840,31 @@ if not user.name: raise fdb.ProgrammingError('You must specify a username.') else: - _checkString(user.name) + _check_string(user.name) user.name = ibase.b(user.name) if not user.password: raise fdb.ProgrammingError('You must specify a password.') else: - _checkString(user.password) + _check_string(user.password) user.password = ibase.b(user.password) - reqBuf = _ServiceActionRequestBuilder(ibase.isc_action_svc_add_user) + req_buf = _ServiceActionRequestBuilder(ibase.isc_action_svc_add_user) - reqBuf.add_string(ibase.isc_spb_sec_username, user.name) - reqBuf.add_string(ibase.isc_spb_sec_password, user.password) + req_buf.add_string(ibase.isc_spb_sec_username, user.name) + req_buf.add_string(ibase.isc_spb_sec_password, user.password) if user.first_name: user.first_name = ibase.b(user.first_name) - reqBuf.add_string(ibase.isc_spb_sec_firstname, user.first_name) + req_buf.add_string(ibase.isc_spb_sec_firstname, user.first_name) if user.middle_name: user.middle_name = ibase.b(user.middle_name) - reqBuf.add_string(ibase.isc_spb_sec_middlename, user.middle_name) + req_buf.add_string(ibase.isc_spb_sec_middlename, user.middle_name) if user.last_name: user.last_name = ibase.b(user.last_name) - reqBuf.add_string(ibase.isc_spb_sec_lastname, user.last_name) + req_buf.add_string(ibase.isc_spb_sec_lastname, user.last_name) - self._act_and_return_textual_results(reqBuf) + self._act_and_return_textual_results(req_buf) def modify_user(self, user): """Modify user information. @@ -1714,26 +1881,26 @@ when it has value. """ self.__check_active() - reqBuf = _ServiceActionRequestBuilder(ibase.isc_action_svc_modify_user) + req_buf = _ServiceActionRequestBuilder(ibase.isc_action_svc_modify_user) if isinstance(user.name, str): user.name = ibase.b(user.name) - reqBuf.add_string(ibase.isc_spb_sec_username, user.name) + req_buf.add_string(ibase.isc_spb_sec_username, user.name) if isinstance(user.password, str): user.password = ibase.b(user.password) - reqBuf.add_string(ibase.isc_spb_sec_password, user.password) + req_buf.add_string(ibase.isc_spb_sec_password, user.password) # Change the optional attributes whether they're empty or not. if isinstance(user.first_name, str): user.first_name = ibase.b(user.first_name) - reqBuf.add_string(ibase.isc_spb_sec_firstname, user.first_name) + req_buf.add_string(ibase.isc_spb_sec_firstname, user.first_name) if isinstance(user.middle_name, str): user.middle_name = ibase.b(user.middle_name) - reqBuf.add_string(ibase.isc_spb_sec_middlename, user.middle_name) + req_buf.add_string(ibase.isc_spb_sec_middlename, user.middle_name) if isinstance(user.last_name, str): user.last_name = ibase.b(user.last_name) - reqBuf.add_string(ibase.isc_spb_sec_lastname, user.last_name) + req_buf.add_string(ibase.isc_spb_sec_lastname, user.last_name) - self._act_and_return_textual_results(reqBuf) + self._act_and_return_textual_results(req_buf) def remove_user(self, user): """Remove user. @@ -1745,13 +1912,13 @@ if isinstance(user, User): username = user.name else: - _checkString(user) + _check_string(user) user = ibase.b(user) username = user - reqBuf = _ServiceActionRequestBuilder(ibase.isc_action_svc_delete_user) - reqBuf.add_string(ibase.isc_spb_sec_username, username) - self._act_and_return_textual_results(reqBuf) + req_buf = _ServiceActionRequestBuilder(ibase.isc_action_svc_delete_user) + req_buf.add_string(ibase.isc_spb_sec_username, username) + self._act_and_return_textual_results(req_buf) def user_exists(self, user): """Check for user's existence. @@ -1780,7 +1947,7 @@ class User(object): def __init__(self, name=None): if name: - _checkString(name) + _check_string(name) self.name = name.upper() else: #: User `login` name (username). @@ -1808,14 +1975,14 @@ return '' % ( (self.name is None and 'without a name') or 'named "%s"' % self.name) - def load_information(self,svc): + def load_information(self, svc): """Load information about user from server. :param svc: Open service connection. :type svc: :class:`Connection` :returns: True if information was successfuly retrieved, False otherwise. - :raises ProgrammingError: If user name is not defined. + :raises `~fdb.ProgrammingError`: If user name is not defined. """ if self.name is None: raise fdb.ProgrammingError("Can't load information about user without name.") @@ -1834,60 +2001,63 @@ # using high-level, easily comprehensible syntax. def __init__(self, clusterIdentifier=None): - self._buffer = [] - if clusterIdentifier: - self.add_code(clusterIdentifier) + self.ci = clusterIdentifier + self.clear() def __str__(self): return self.render() - def extend(self, otherRequestBuilder): - self._buffer.append(otherRequestBuilder.render()) + def clear(self): + self._buffer = [] + if self.ci: + self.add_code(self.ci) + + def extend(self, other_request_builder): + self._buffer.append(other_request_builder.render()) def add_code(self, code): _code2spb(self._buffer, code) def add_string(self, code, s): - _checkString(s) + _check_string(s) _string2spb(self._buffer, code, ibase.b(s)) - def add_string_sequence(self, code, stringSequence): - for s in stringSequence: + def add_string_sequence(self, code, string_sequence): + for s in string_sequence: self.add_string(code, s) - def add_string_numeric_pairs_sequence(self, stringCode, stringSequence, - numericCode, numericSequence): - stringCount = len(stringSequence) - numericCount = len(numericSequence) - if numericCount != stringCount - 1: + def add_string_numeric_pairs_sequence(self, string_code, string_sequence, + numeric_code, numeric_sequence): + string_count = len(string_sequence) + numeric_count = len(numeric_sequence) + if numeric_count != string_count - 1: raise ValueError("Numeric sequence must contain exactly one less" - " element than its companion string sequence." - ) + " element than its companion string sequence.") i = 0 - while i < stringCount: - self.add_string(stringCode, stringSequence[i]) - if i < numericCount: - self.add_numeric(numericCode, numericSequence[i]) + while i < string_count: + self.add_string(string_code, string_sequence[i]) + if i < numeric_count: + self.add_numeric(numeric_code, numeric_sequence[i]) i += 1 - def add_numeric(self, code, n, numCType='I'): - _numeric2spb(self._buffer, code, n, numCType=numCType) + def add_numeric(self, code, n, numctype='I'): + _numeric2spb(self._buffer, code, n, numctype=numctype) - def add_option_mask(self, optionMask): - self.add_numeric(ibase.isc_spb_options, optionMask) + def add_option_mask(self, option_mask): + self.add_numeric(ibase.isc_spb_options, option_mask) - def add_database_name(self, databaseName): + def add_database_name(self, database_name): # 2003.07.20: Issue a warning for a hostname-containing databaseName # because it will cause isc_service_start to raise an inscrutable error # message with Firebird 1.5 (though it would not have raised an error # at all with Firebird 1.0 and earlier). ### Todo: verify handling of P version differences, refactor - databaseName = ibase.b(databaseName,fdb.fbcore._FS_ENCODING) + database_name = ibase.b(database_name, fdb.fbcore._FS_ENCODING) if ibase.PYTHON_MAJOR_VER == 3: - colonIndex = (databaseName.decode(fdb.fbcore._FS_ENCODING)).find(':') + colon_index = (database_name.decode(fdb.fbcore._FS_ENCODING)).find(':') else: - colonIndex = databaseName.find(':') - if colonIndex != -1: + colon_index = database_name.find(':') + if colon_index != -1: # This code makes no provision for platforms other than Windows # that allow colons in paths (such as MacOS). Some of # kinterbasdb's current implementation (e.g., event handling) is @@ -1897,23 +2067,19 @@ # # Files that don't exist might still be valid if the connection # is to a server other than the local machine. - not os.path.exists(ibase.nativestr(databaseName,fdb.fbcore._FS_ENCODING)) + not os.path.exists(ibase.nativestr(database_name, fdb.fbcore._FS_ENCODING)) # "Guess" that if the colon falls within the first two # characters of the string, the pre-colon portion refers to a # Windows drive letter rather than to a remote host. # This isn't guaranteed to be correct. - and colonIndex > 1 - ): - warnings.warn( - ' Unlike conventional DSNs, Services API database names' - ' must not include the host name; remove the "%s" from' - ' your database name.' - ' (Firebird 1.0 will accept this, but Firebird 1.5 will' - ' raise an error.)' - % databaseName[:colonIndex + 1], - UserWarning - ) - self.add_string(ibase.isc_spb_dbname, databaseName) + and colon_index > 1): + warnings.warn(' Unlike conventional DSNs, Services API database names' + ' must not include the host name; remove the "%s" from' + ' your database name.' + ' (Firebird 1.0 will accept this, but Firebird 1.5 will' + ' raise an error.)' + % database_name[:colon_index + 1], UserWarning) + self.add_string(ibase.isc_spb_dbname, database_name) def render(self): - return ibase.b('').join(self._buffer) \ No newline at end of file + return ibase.b('').join(self._buffer) diff -Nru fdb-1.6.1+dfsg1/fdb/trace.py fdb-2.0.0/fdb/trace.py --- fdb-1.6.1+dfsg1/fdb/trace.py 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/fdb/trace.py 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,969 @@ +#coding:utf-8 +# +# PROGRAM/MODULE: fdb +# FILE: trace.py +# DESCRIPTION: Python driver for Firebird - Firebird Trace & Audit +# CREATED: 10.12.2017 +# +# Software distributed under the License is distributed AS IS, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. +# See the License for the specific language governing rights +# and limitations under the License. +# +# The Original Code was created by Pavel Cisar +# +# Copyright (c) Pavel Cisar +# and all contributors signed below. +# +# All Rights Reserved. +# Contributor(s): ______________________________________. +# +# See LICENSE.TXT for details. + +import fdb +import datetime +import decimal +import collections +from . import utils +try: + from sys import intern +except ImportError: + pass + +#: Trace event status codes +STATUS_OK = ' ' +STATUS_FAILED = 'F' +STATUS_UNAUTHORIZED = 'U' +STATUS_UNKNOWN = '?' + +#: Trace event codes, also works as index to EVENTS list +EVENT_TRACE_INIT = 0 +EVENT_TRACE_SUSPEND = 1 +EVENT_TRACE_END = 2 +EVENT_CREATE_DATABASE = 3 +EVENT_DROP_DATABASE = 4 +EVENT_ATTACH = 5 +EVENT_DETACH = 6 +EVENT_TRANSACTION_START = 7 +EVENT_COMMIT = 8 +EVENT_ROLLBACK = 9 +EVENT_COMMIT_R = 10 +EVENT_ROLLBACK_R = 11 +EVENT_STMT_PREPARE = 12 +EVENT_STMT_START = 13 +EVENT_STMT_END = 14 +EVENT_STMT_FREE = 15 +EVENT_STMT_CLOSE = 16 +EVENT_TRG_START = 17 +EVENT_TRG_END = 18 +EVENT_PROC_START = 19 +EVENT_PROC_END = 20 +EVENT_SVC_START = 21 +EVENT_SVC_ATTACH = 22 +EVENT_SVC_DETACH = 23 +EVENT_SVC_QUERY = 24 +EVENT_SET_CONTEXT = 25 +EVENT_ERROR = 26 +EVENT_WARNING = 27 +EVENT_SWEEP_START = 28 +EVENT_SWEEP_PROGRESS = 29 +EVENT_SWEEP_FINISH = 30 +EVENT_SWEEP_FAILED = 31 +EVENT_BLR_COMPILE = 32 +EVENT_BLR_EXECUTE = 33 +EVENT_DYN_EXECUTE = 34 +EVENT_UNKNOWN = 35 + +#: List of trace event names in order matching their numeric codes +EVENTS = ['TRACE_INIT', 'TRACE_SUSPENDED', 'TRACE_FINI', + 'CREATE_DATABASE', 'DROP_DATABASE', 'ATTACH_DATABASE', 'DETACH_DATABASE', + 'START_TRANSACTION', 'COMMIT_TRANSACTION', 'ROLLBACK_TRANSACTION', 'COMMIT_RETAINING', 'ROLLBACK_RETAINING', + 'PREPARE_STATEMENT', 'EXECUTE_STATEMENT_START', 'EXECUTE_STATEMENT_FINISH', 'FREE_STATEMENT', 'CLOSE_CURSOR', + 'EXECUTE_TRIGGER_START', 'EXECUTE_TRIGGER_FINISH', + 'EXECUTE_PROCEDURE_START', 'EXECUTE_PROCEDURE_FINISH', + 'START_SERVICE', 'ATTACH_SERVICE', 'DETACH_SERVICE', 'QUERY_SERVICE', + 'SET_CONTEXT', 'ERROR', 'WARNING', + 'SWEEP_START', 'SWEEP_PROGRESS', 'SWEEP_FINISH', 'SWEEP_FAILED', + 'COMPILE_BLR', 'EXECUTE_BLR', 'EXECUTE_DYN', + 'UNKNOWN'] + +# +# Named tuples for individual trace events +AttachmentInfo = collections.namedtuple('AttachmentInfo', 'attachment_id,database,charset,protocol,address,user,role,remote_process,remote_pid') +TransactionInfo = collections.namedtuple('TransactionInfo', 'attachment_id,transaction_id,options') +ServiceInfo = collections.namedtuple('ServiceInfo', 'service_id,user,protocol,address,remote_process,remote_pid') +SQLInfo = collections.namedtuple('SQLInfo', 'sql_id,sql,plan') +ParamInfo = collections.namedtuple('ParamInfo', 'par_id,params') +# +AccessTuple = collections.namedtuple('AccessTuple', 'table,natural,index,update,insert,delete,backout,purge,expunge') +# +EventTraceInit = collections.namedtuple('EventTraceInit', 'event_id,timestamp,session_name') +EventTraceSuspend = collections.namedtuple('EventTraceSuspend', 'event_id,timestamp,session_name') +EventTraceFinish = collections.namedtuple('EventTraceFinish', 'event_id,timestamp,session_name') +# +EventCreate = collections.namedtuple('EventCreate', 'event_id,timestamp,status,attachment_id,database,charset,protocol,address,user,role,remote_process,remote_pid') +EventDrop = collections.namedtuple('EventDrop', 'event_id,timestamp,status,attachment_id,database,charset,protocol,address,user,role,remote_process,remote_pid') +EventAttach = collections.namedtuple('EventAttach', 'event_id,timestamp,status,attachment_id,database,charset,protocol,address,user,role,remote_process,remote_pid') +EventDetach = collections.namedtuple('EventDetach', 'event_id,timestamp,status,attachment_id,database,charset,protocol,address,user,role,remote_process,remote_pid') +# +EventTransactionStart = collections.namedtuple('EventTransactionStart', 'event_id,timestamp,status,attachment_id,transaction_id,options') +EventCommit = collections.namedtuple('EventCommit', 'event_id,timestamp,status,attachment_id,transaction_id,options,run_time,reads,writes,fetches,marks') +EventRollback = collections.namedtuple('EventRollback', 'event_id,timestamp,status,attachment_id,transaction_id,options,run_time,reads,writes,fetches,marks') +EventCommitRetaining = collections.namedtuple('EventCommitRetaining', 'event_id,timestamp,status,attachment_id,transaction_id,options,run_time,reads,writes,fetches,marks') +EventRollbackRetaining = collections.namedtuple('EventRollbackRetaining', 'event_id,timestamp,status,attachment_id,transaction_id,options,run_time,reads,writes,fetches,marks') +# +EventPrepareStatement = collections.namedtuple('EventPrepareStatement', 'event_id,timestamp,status,attachment_id,transaction_id,statement_id,sql_id,prepare_time') +EventStatementStart = collections.namedtuple('EventStatementStart', 'event_id,timestamp,status,attachment_id,transaction_id,statement_id,sql_id,param_id') +EventStatementFinish = collections.namedtuple('EventStatementFinish', 'event_id,timestamp,status,attachment_id,transaction_id,statement_id,sql_id,param_id,records,run_time,reads,writes,fetches,marks,access') +EventFreeStatement = collections.namedtuple('EventFreeStatement', 'event_id,timestamp,attachment_id,transaction_id,statement_id,sql_id') +EventCloseCursor = collections.namedtuple('EventCloseCursor', 'event_id,timestamp,attachment_id,transaction_id,statement_id,sql_id') +# +EventTriggerStart = collections.namedtuple('EventTriggerStart', 'event_id,timestamp,status,attachment_id,transaction_id,trigger,table,event') +EventTriggerFinish = collections.namedtuple('EventTriggerFinish', 'event_id,timestamp,status,attachment_id,transaction_id,trigger,table,event,run_time,reads,writes,fetches,marks,access') +# +EventProcedureStart = collections.namedtuple('EventProcedureStart', 'event_id,timestamp,status,attachment_id,transaction_id,procedure,param_id') +EventProcedureFinish = collections.namedtuple('EventProcedureFinish', 'event_id,timestamp,status,attachment_id,transaction_id,procedure,param_id,run_time,reads,writes,fetches,marks,access') +# +EventServiceAttach = collections.namedtuple('EventServiceAttach', 'event_id,timestamp,status,service_id') +EventServiceDetach = collections.namedtuple('EventServiceDetach', 'event_id,timestamp,status,service_id') +EventServiceStart = collections.namedtuple('EventServiceStart', 'event_id,timestamp,status,service_id,action,parameters') +EventServiceQuery = collections.namedtuple('EventServiceQuery', 'event_id,timestamp,status,service_id,action,parameters') +# +EventSetContext = collections.namedtuple('EventSetContext', 'event_id,timestamp,attachment_id,transaction_id,context,key,value') +# +EventError = collections.namedtuple('EventError', 'event_id,timestamp,attachment_id,place,details') +EventServiceError = collections.namedtuple('EventServiceError', 'event_id,timestamp,service_id,place,details') +EventWarning = collections.namedtuple('EventWarning', 'event_id,timestamp,attachment_id,place,details') +EventServiceWarning = collections.namedtuple('EventServiceWarning', 'event_id,timestamp,service_id,place,details') +# +EventSweepStart = collections.namedtuple('EventSweepStart', 'event_id,timestamp,attachment_id,oit,oat,ost,next') +EventSweepProgress = collections.namedtuple('EventSweepProgress', 'event_id,timestamp,attachment_id,run_time,reads,writes,fetches,marks,access') +EventSweepFinish = collections.namedtuple('EventSweepFinish', 'event_id,timestamp,attachment_id,oit,oat,ost,next,run_time,reads,writes,fetches,marks') + +EventSweepFailed = collections.namedtuple('EventSweepFailed', 'event_id,timestamp,attachment_id') +# +EventBLRCompile = collections.namedtuple('EventBLRCompile', 'event_id,timestamp,status,attachment_id,statement_id,content,prepare_time') +EventBLRExecute = collections.namedtuple('EventBLRExecute', 'event_id,timestamp,status,attachment_id,transaction_id,statement_id,content,run_time,reads,writes,fetches,marks,access') +EventDYNExecute = collections.namedtuple('EventDYNExecute', 'event_id,timestamp,status,attachment_id,transaction_id,content,run_time') +# +EventUnknown = collections.namedtuple('EventUnknown', 'event_id,timestamp,data') + +class TraceParser(object): + """Parser for standard textual trace log. Produces named tuples describing individual trace log entries/events. + + Attributes: + + :seen_attachments: Set of attachment ids that were already processed. + :seen_transactions: Set of transaction ids that were already processed. + :seen_services: Set of service ids that were already processed. + :sqlinfo_map: Dictionary that maps (sql_cmd,plan) keys to internal ids + :param_map: Dictionary that maps parameters (statement or procedure) keys to internal ids + :next_event_id: Sequence id that would be assigned to next parsed event (starts with 1). + :next_sql_id: Sequence id that would be assigned to next parsed unique SQL command (starts with 1). + :next_param_id: Sequence id that would be assigned to next parsed unique parameter (starts with 1). +""" + def __init__(self): + self.seen_attachments = set() + self.seen_transactions = set() + self.seen_services = set() + self.sqlinfo_map = {} + self.param_map = {} + self.next_event_id = 1 + self.next_sql_id = 1 + self.next_param_id = 1 + # + self.__buffer = [] + self.__current_event = None + self.__current_block = None + self.__last_timestamp = None + self.__event_values = {} + self.__parse_map = {EVENT_TRACE_INIT: self.__parser_trace_init, + EVENT_TRACE_END: self.__parser_trace_finish, + EVENT_TRANSACTION_START: self.__parser_start_transaction, + EVENT_COMMIT: self.__parser_commit_transaction, + EVENT_ROLLBACK: self.__parser_rollback_transaction, + EVENT_COMMIT_R: self.__parser_commit_retaining, + EVENT_ROLLBACK_R: self.__parser_rollback_retaining, + EVENT_STMT_PREPARE: self.__parser_prepare_statement, + EVENT_STMT_START: self.__parser_execute_statement_start, + EVENT_STMT_END: self.__parser_execute_statement_finish, + EVENT_STMT_FREE: self.__parser_free_statement, + EVENT_STMT_CLOSE: self.__parser_close_cursor, + EVENT_TRG_START: self.__parser_trigger_start, + EVENT_TRG_END: self.__parser_trigger_finish, + EVENT_PROC_START: self.__parser_procedure_start, + EVENT_PROC_END: self.__parser_procedure_finish, + EVENT_CREATE_DATABASE: self.__parser_create_db, + EVENT_DROP_DATABASE: self.__parser_drop_db, + EVENT_ATTACH: self.__parser_attach, + EVENT_DETACH: self.__parser_detach, + EVENT_SVC_START: self.__parser_service_start, + EVENT_SVC_ATTACH: self.__parser_service_attach, + EVENT_SVC_DETACH: self.__parser_service_detach, + EVENT_SVC_QUERY: self.__parser_service_query, + EVENT_SET_CONTEXT: self.__parser_set_context, + EVENT_ERROR: self.__parser_error, + EVENT_WARNING: self.__parser_warning, + EVENT_SWEEP_START: self.__parser_sweep_start, + EVENT_SWEEP_PROGRESS: self.__parser_sweep_progress, + EVENT_SWEEP_FINISH: self.__parser_sweep_finish, + EVENT_SWEEP_FAILED: self.__parser_sweep_failed, + EVENT_BLR_COMPILE: self.__parser_blr_compile, + EVENT_BLR_EXECUTE: self.__parser_blr_execute, + EVENT_DYN_EXECUTE: self.__parser_dyn_execute, + EVENT_UNKNOWN: self.__parser_unknown} + def _is_entry_header(self, line): + """Returns True if parameter is trace log entry header. This version only checks that first item is a timestamp in valid format. + + :param string line: Line of text to be checked. +""" + items = line.split() + try: + timestamp = datetime.datetime.strptime(items[0], '%Y-%m-%dT%H:%M:%S.%f') + return True + except: + return False + def _is_session_suspended(self, line): + """Returns True if parameter is trace log message that trace session was suspended due to full log. + + :param string line: Line of text to be checked. +""" + return line.rfind('is suspended as its log is full ---') >= 0 + def _is_plan_separator(self, line): + """Returns True if parameter is statement plan separator. + + :param string line: Line of text to be checked. +""" + return line == '^'*79 + def _is_perf_start(self, line): + """Returns True if parameter is first item of statement performance information. + + :param string line: Line of text to be checked. +""" + return line.endswith(' records fetched') + def _is_blr_perf_start(self, line): + """Returns True if parameter is first item of BLR/DYN performance information. + + :param string line: Line of text to be checked. +""" + parts = line.split() + return 'ms' in parts or 'fetch(es)' in parts or 'mark(s)' in parts or 'read(s)' in parts or 'write(s)' in parts + def _is_param_start(self, line): + """Returns True if parameter is first item in list of parameters. + + :param string line: Line of text to be checked. +""" + return line.startswith('param0 = ') + def _iter_trace_blocks(self, ilines): + lines = [] + for line in ilines: + line = line.strip() + if line: + if not lines: + if self._is_entry_header(line): + lines.append(line) + else: + if self._is_entry_header(line) or self._is_session_suspended(line): + yield lines + lines = [line] + else: + lines.append(line) + if lines: + yield lines + def _parse_header(self, line): + """Parses trace entry header into 3-item tuple. + + :param string line: Line of text to be parsed. + + :returns: Tuple with items: (timestamp, status, trace_entry_type_id) + + :raises `~fdb.ParseError`: When event is not recognized +""" + items = line.split() + timestamp = datetime.datetime.strptime(items[0], '%Y-%m-%dT%H:%M:%S.%f') + if (len(items) == 3) or (items[2] in ['ERROR', 'WARNING']): + return (timestamp, STATUS_OK, EVENTS.index(items[2]) if items[2] in EVENTS else EVENT_UNKNOWN) + else: + if items[2] == 'UNAUTHORIZED': + return (timestamp, STATUS_UNAUTHORIZED, EVENTS.index(items[3])) + elif items[2] == 'FAILED': + return (timestamp, STATUS_FAILED, EVENTS.index(items[3])) + elif items[2] == 'Unknown': + return (timestamp, STATUS_UNKNOWN, EVENT_UNKNOWN) # ' '.join(items[3:])) + else: + raise fdb.ParseError('Unrecognized event header: "%s"' % line) + def _parse_attachment_info(self, values, check=True): + line = self.__current_block.popleft() + database, sep, attachment = line.partition(' (') + values['database'] = database + attachment_id, user_role, charset, protocol_address = attachment.strip('()').split(',') + pad, s = attachment_id.split('_') + values['attachment_id'] = int(s) + values['charset'] = charset.strip() + # + protocol_address = protocol_address.strip() + if protocol_address == '': + protocol = address = protocol_address + else: + protocol, address = protocol_address.split(':') + values['protocol'] = protocol + values['address'] = address + if ':' in user_role: + a, b = user_role.strip().split(':') + else: + a = user_role.strip() + b = 'NONE' + values['user'] = a + values['role'] = b + if protocol_address == '': + values['remote_process'] = None + values['remote_pid'] = None + elif len(self.__current_block) > 0 and not (self.__current_block[0].startswith('(TRA') or + ' ms,' in self.__current_block[0] or + 'Transaction counters:' in self.__current_block[0]): + remote_process_id = self.__current_block.popleft() + remote_process, remote_pid = remote_process_id.rsplit(':', 1) + values['remote_process'] = remote_process + values['remote_pid'] = int(remote_pid) + else: + values['remote_process'] = None + values['remote_pid'] = None + # + if check and values['attachment_id'] not in self.seen_attachments: + self.__buffer.append(AttachmentInfo(**values)) + self.seen_attachments.add(values['attachment_id']) + def _parse_transaction_info(self, values, check=True): + # Transaction parameters + transaction_id, transaction_options = self.__current_block.popleft().strip('\t ()').split(',') + pad, s = transaction_id.split('_') + values['attachment_id'] = values['attachment_id'] + values['transaction_id'] = int(s) + values['options'] = [intern(x.strip()) for x in transaction_options.split('|')] + if check and values['transaction_id'] not in self.seen_transactions: + self.__buffer.append(TransactionInfo(**values)) + self.seen_transactions.add(values['transaction_id']) + def _parse_transaction_performance(self): + self.__event_values['run_time'] = None + self.__event_values['reads'] = None + self.__event_values['writes'] = None + self.__event_values['fetches'] = None + self.__event_values['marks'] = None + if self.__current_block: + values = self.__current_block.popleft().split(',') + while values: + value, val_type = values.pop().split() + if 'ms' in val_type: + self.__event_values['run_time'] = int(value) + elif 'read' in val_type: + self.__event_values['reads'] = int(value) + elif 'write' in val_type: + self.__event_values['writes'] = int(value) + elif 'fetch' in val_type: + self.__event_values['fetches'] = int(value) + elif 'mark' in val_type: + self.__event_values['marks'] = int(value) + else: + raise fdb.ParseError("Unhandled performance parameter %s" % val_type) + def _parse_attachment_and_transaction(self): + # Attachment + att_values = {} + self._parse_attachment_info(att_values) + # Transaction + tr_values = {} + tr_values['attachment_id'] = att_values['attachment_id'] + self._parse_transaction_info(tr_values) + self.__event_values['attachment_id'] = tr_values['attachment_id'] + self.__event_values['transaction_id'] = tr_values['transaction_id'] + def _parse_statement_id(self): + self.__event_values['plan'] = None + self.__event_values['sql'] = None + pad, s = self.__current_block.popleft().split() + self.__event_values['statement_id'] = int(s[:-1]) + if self.__current_block.popleft() != '-'*79: + raise fdb.ParseError("Separator '-'*79 line expected") + def _parse_blr_statement_id(self): + line = self.__current_block[0].strip() + if line.startswith('Statement ') and line[-1] == ':': + pad, s = self.__current_block.popleft().split() + self.__event_values['statement_id'] = int(s[:-1]) + else: + self.__event_values['statement_id'] = None + def _parse_blrdyn_content(self): + if self.__current_block[0] == '-'*79: + self.__current_block.popleft() + content = [] + line = self.__current_block.popleft() + while line and not self._is_blr_perf_start(line): + content.append(line) + if self.__current_block: + line = self.__current_block.popleft() + else: + line = None + if line: + self.__current_block.appendleft(line) + self.__event_values['content'] = '\n'.join(content) + else: + self.__event_values['content'] = None + def _parse_prepare_time(self): + if self.__current_block and self.__current_block[-1].endswith(' ms'): + run_time = self.__current_block.pop() + time, measure = run_time.split() + self.__event_values['prepare_time'] = int(time) + else: + self.__event_values['prepare_time'] = None + def _parse_sql_statement(self): + if not self.__current_block: + return + line = self.__current_block.popleft() + sql = [] + while line and not (self._is_plan_separator(line) or self._is_perf_start(line) or self._is_param_start(line)): + sql.append(line) + if self.__current_block: + line = self.__current_block.popleft() + else: + line = None + if line: + self.__current_block.appendleft(line) + self.__event_values['sql'] = '\n'.join(sql) + def _parse_plan(self): + if not self.__current_block: + return + line = self.__current_block.popleft() + if self._is_perf_start(line): + self.__current_block.appendleft(line) + return + if self._is_param_start(line): + self.__current_block.appendleft(line) + return + if not self._is_plan_separator(line): + raise fdb.ParseError("Separator '^'*79 line expected") + line = self.__current_block.popleft() + plan = [] + while line and not (self._is_perf_start(line) or self._is_param_start(line)): + plan.append(line) + if self.__current_block: + line = self.__current_block.popleft() + else: + line = None + if line: + self.__current_block.appendleft(line) + self.__event_values['plan'] = '\n'.join(plan) + def _parse_parameters(self, for_procedure=False): + parameters = [] + while self.__current_block and self.__current_block[0].startswith('param'): + line = self.__current_block.popleft() + param_id, param_def = line.split(' = ') + param_type, param_value = param_def.rsplit(',', 1) + param_value = param_value.strip(' "') + if param_value == '': + param_value = None + elif param_type in ['smallint', 'integer', 'bigint']: + param_value = int(param_value) + elif param_type == 'timestamp': + param_value = datetime.datetime.strptime(param_value, '%Y-%m-%dT%H:%M:%S.%f') + elif param_type == 'date': + param_value = datetime.datetime.strptime(param_value, '%Y-%m-%d') + elif param_type == 'time': + param_value = datetime.datetime.strptime(param_value, '%H:%M:%S.%f') + elif param_type in ['float', 'double precision']: + param_value = decimal.Decimal(param_value) + parameters.append((param_type, param_value,)) + while self.__current_block and self.__current_block[0].endswith('more arguments skipped...'): + self.__current_block.popleft() + # + param_id = None + if len(parameters) > 0: + key = tuple(parameters) + if key in self.param_map: + param_id = self.param_map[key] + else: + param_id = self.next_param_id + self.next_param_id += 1 + self.param_map[key] = param_id + self.__buffer.append(ParamInfo(**{'par_id': param_id, 'params': parameters})) + # + self.__event_values['param_id'] = param_id + def _parse_performance(self): + self.__event_values['run_time'] = None + self.__event_values['reads'] = None + self.__event_values['writes'] = None + self.__event_values['fetches'] = None + self.__event_values['marks'] = None + self.__event_values['access'] = None + if not self.__current_block: + return + if 'records fetched' in self.__current_block[0]: + line = self.__current_block.popleft() + self.__event_values['records'] = int(line.split()[0]) + values = self.__current_block.popleft().split(',') + while values: + value, val_type = values.pop().split() + if 'ms' in val_type: + self.__event_values['run_time'] = int(value) + elif 'read' in val_type: + self.__event_values['reads'] = int(value) + elif 'write' in val_type: + self.__event_values['writes'] = int(value) + elif 'fetch' in val_type: + self.__event_values['fetches'] = int(value) + elif 'mark' in val_type: + self.__event_values['marks'] = int(value) + else: + raise fdb.ParseError("Unhandled performance parameter %s" % val_type) + if self.__current_block: + self.__event_values['access'] = [] + if self.__current_block.popleft() != "Table Natural Index Update Insert Delete Backout Purge Expunge": + raise fdb.ParseError("Performance table header expected") + if self.__current_block.popleft() != "*"*111: + raise fdb.ParseError("Performance table header separator expected") + while self.__current_block: + entry = self.__current_block.popleft() + self.__event_values['access'].append(AccessTuple._make((intern(entry[:32].strip()), + utils.safe_int(entry[32:41].strip()), + utils.safe_int(entry[41:51].strip()), + utils.safe_int(entry[51:61].strip()), + utils.safe_int(entry[61:71].strip()), + utils.safe_int(entry[71:81].strip()), + utils.safe_int(entry[81:91].strip()), + utils.safe_int(entry[91:101].strip()), + utils.safe_int(entry[101:111].strip())))) + def _parse_sql_info(self): + plan = self.__event_values['plan'] + sql = self.__event_values['sql'] + key = (sql, plan) + # + if key in self.sqlinfo_map: + sql_id = self.sqlinfo_map[key] + else: + sql_id = self.next_sql_id + self.next_sql_id += 1 + self.sqlinfo_map[key] = sql_id + self.__buffer.append(SQLInfo(**{'sql_id': sql_id, 'sql': sql, 'plan': plan,})) + # + del self.__event_values['plan'] + del self.__event_values['sql'] + self.__event_values['sql_id'] = sql_id + def _parse_trigger(self): + trigger, event = self.__current_block.popleft().split('(') + if ' FOR ' in trigger: + a, b = trigger.split(' FOR ') + self.__event_values['trigger'] = a + self.__event_values['table'] = b.strip() + else: + self.__event_values['trigger'] = trigger.strip() + self.__event_values['table'] = None + self.__event_values['event'] = event.strip('()') + def _parse_service(self): + line = self.__current_block.popleft() + if 'service_mgr' not in line: + raise fdb.ParseError("Service connection description expected.") + pad, sep, s = line.partition(' (') + svc_id, user, protocol_address, remote_process_id = s.strip('()').split(',') + pad, svc_id = svc_id.split(' ') + svc_id = int(svc_id if svc_id.startswith('0x') else '0x%s' % svc_id, 0) + if svc_id not in self.seen_services: + svc_values = {} + svc_values['service_id'] = svc_id + svc_values['user'] = user.strip() + protocol_address = protocol_address.strip() + if protocol_address == '': + protocol = address = protocol_address + else: + protocol, address = protocol_address.split(':') + svc_values['protocol'] = protocol + svc_values['address'] = address + remote_process_id = remote_process_id.strip() + remote_process, remote_pid = remote_process_id.rsplit(':', 1) + svc_values['remote_process'] = remote_process + svc_values['remote_pid'] = int(remote_pid) + self.__buffer.append(ServiceInfo(**svc_values)) + self.seen_services.add(svc_id) + self.__event_values['service_id'] = svc_id + def _parse_sweep_attachment(self): + att_values = {} + self._parse_attachment_info(att_values) + self.__event_values['attachment_id'] = att_values['attachment_id'] + #values = {'remote_process': None, 'remote_pid': None,} + #line = self.__current_block.popleft() + #database, sep, attachment = line.partition(' (') + #values['database'] = database + #attachment_id, user_role, charset, protocol_address = attachment.strip('()').split(',') + #pad, s = attachment_id.split('_') + #self.__event_values['attachment_id'] = values['attachment_id'] = int(s) + #values['charset'] = charset.strip() + ## + #protocol_address = protocol_address.strip() + #if protocol_address == '': + #protocol = address = protocol_address + #else: + #protocol, address = protocol_address.split(':') + #values['protocol'] = protocol + #values['address'] = address + #if ':' in user_role: + #a, b = user_role.strip().split(':') + #else: + #a = user_role.strip() + #b = 'NONE' + #values['user'] = a + #values['role'] = b + #if values['attachment_id'] not in self.seen_attachments: + #self.__writer.write(AttachmentInfo(**values)) + #self.seen_attachments.add(values['attachment_id']) + def _parse_sweep_tr_counters(self): + line = self.__current_block.popleft() + if not line: + line = self.__current_block.popleft() + if 'Transaction counters:' not in line: + raise fdb.ParseError("Transaction counters expected") + while len(self.__current_block) > 0: + line = self.__current_block.popleft() + if 'Oldest interesting' in line: + self.__event_values['oit'] = int(line.rsplit(' ', 1)[1]) + elif 'Oldest active' in line: + self.__event_values['oat'] = int(line.rsplit(' ', 1)[1]) + elif 'Oldest snapshot' in line: + self.__event_values['ost'] = int(line.rsplit(' ', 1)[1]) + elif 'Next transaction' in line: + self.__event_values['next'] = int(line.rsplit(' ', 1)[1]) + elif 'ms' in line and len(self.__current_block) == 0: + # Put back performance counters + self.__current_block.appendleft(line) + break + def __parse_trace_header(self): + self.__last_timestamp, status, self.__current_event = self._parse_header(self.__current_block.popleft()) + self.__event_values['event_id'] = self.next_event_id + self.next_event_id += 1 + self.__event_values['status'] = status + self.__event_values['timestamp'] = self.__last_timestamp + def __parser_trace_suspend(self): + # Session was suspended because log was full, so we will create fake event to note that + line = self.__current_block.popleft() + self.__event_values['timestamp'] = self.__last_timestamp + self.__event_values['event_id'] = self.next_event_id + session_name = line[4:line.find(' is suspended')] + self.__event_values['session_name'] = session_name.replace(' ', '_').upper() + self.next_event_id += 1 + return EventTraceSuspend(**self.__event_values) + def __parser_trace_init(self): + self.__parse_trace_header() + del self.__event_values['status'] + self.__event_values['session_name'] = self.__current_block.popleft() + return EventTraceInit(**self.__event_values) + def __parser_trace_finish(self): + self.__parse_trace_header() + del self.__event_values['status'] + self.__event_values['session_name'] = self.__current_block.popleft() + return EventTraceFinish(**self.__event_values) + def __parser_start_transaction(self): + self.__parse_trace_header() + # Attachment + values = {} + self._parse_attachment_info(values) + self.__event_values['attachment_id'] = values['attachment_id'] + # Transaction parameters + self._parse_transaction_info(self.__event_values, check=False) + return EventTransactionStart(**self.__event_values) + def __parser_commit_transaction(self): + self.__parse_trace_header() + # Attachment + values = {} + self._parse_attachment_info(values) + self.__event_values['attachment_id'] = values['attachment_id'] + # Transaction parameters + self._parse_transaction_info(self.__event_values, check=False) + self._parse_transaction_performance() + return EventCommit(**self.__event_values) + def __parser_rollback_transaction(self): + self.__parse_trace_header() + # Attachment + values = {} + self._parse_attachment_info(values) + self.__event_values['attachment_id'] = values['attachment_id'] + # Transaction parameters + self._parse_transaction_info(self.__event_values, check=False) + self._parse_transaction_performance() + return EventRollback(**self.__event_values) + def __parser_commit_retaining(self): + self.__parse_trace_header() + # Attachment + values = {} + self._parse_attachment_info(values) + self.__event_values['attachment_id'] = values['attachment_id'] + # Transaction parameters + self._parse_transaction_info(self.__event_values, check=False) + self._parse_transaction_performance() + return EventCommitRetaining(**self.__event_values) + def __parser_rollback_retaining(self): + self.__parse_trace_header() + # Attachment + values = {} + self._parse_attachment_info(values) + self.__event_values['attachment_id'] = values['attachment_id'] + # Transaction parameters + self._parse_transaction_info(self.__event_values, check=False) + self._parse_transaction_performance() + return EventRollbackRetaining(**self.__event_values) + def __parser_prepare_statement(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + self._parse_statement_id() + self._parse_prepare_time() + self._parse_sql_statement() + self._parse_plan() + self._parse_sql_info() + return EventPrepareStatement(**self.__event_values) + def __parser_execute_statement_start(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + self._parse_statement_id() + self._parse_sql_statement() + self._parse_plan() + self._parse_parameters() + self._parse_sql_info() + return EventStatementStart(**self.__event_values) + def __parser_execute_statement_finish(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + self._parse_statement_id() + self._parse_sql_statement() + self._parse_plan() + self._parse_parameters() + self.__event_values['records'] = None + self._parse_performance() + self._parse_sql_info() + return EventStatementFinish(**self.__event_values) + def __parser_free_statement(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + self._parse_statement_id() + self._parse_sql_statement() + self._parse_plan() + self._parse_sql_info() + del self.__event_values['status'] + return EventFreeStatement(**self.__event_values) + def __parser_close_cursor(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + self._parse_statement_id() + self._parse_sql_statement() + self._parse_plan() + self._parse_sql_info() + del self.__event_values['status'] + return EventCloseCursor(**self.__event_values) + def __parser_trigger_start(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + self._parse_trigger() + return EventTriggerStart(**self.__event_values) + def __parser_trigger_finish(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + self._parse_trigger() + self._parse_performance() + return EventTriggerFinish(**self.__event_values) + def __parser_procedure_start(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + pad, s = self.__current_block.popleft().split() + self.__event_values['procedure'] = s[:-1] + self._parse_parameters(for_procedure=True) + return EventProcedureStart(**self.__event_values) + def __parser_procedure_finish(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + pad, s = self.__current_block.popleft().split() + self.__event_values['procedure'] = s[:-1] + self._parse_parameters(for_procedure=True) + self._parse_performance() + return EventProcedureFinish(**self.__event_values) + def __parser_create_db(self): + self.__parse_trace_header() + # Attachment parameters + self._parse_attachment_info(self.__event_values, check=False) + return EventCreate(**self.__event_values) + def __parser_drop_db(self): + self.__parse_trace_header() + # Attachment parameters + self._parse_attachment_info(self.__event_values, check=False) + return EventDrop(**self.__event_values) + def __parser_attach(self): + self.__parse_trace_header() + # Attachment parameters + self._parse_attachment_info(self.__event_values, check=False) + #self.__event_values['unauthorized'] = False + return EventAttach(**self.__event_values) + def __parser_detach(self): + self.__parse_trace_header() + # Attachment parameters + self._parse_attachment_info(self.__event_values, check=False) + return EventDetach(**self.__event_values) + def __parser_service_start(self): + self.__parse_trace_header() + self._parse_service() + # service parameters + action = self.__current_block.popleft().strip('"') + self.__event_values['action'] = action + parameters = [] + while len(self.__current_block) > 0: + parameters.append(self.__current_block.popleft()) + self.__event_values['parameters'] = parameters + # + return EventServiceStart(**self.__event_values) + def __parser_service_attach(self): + self.__parse_trace_header() + self._parse_service() + return EventServiceAttach(**self.__event_values) + def __parser_service_detach(self): + self.__parse_trace_header() + self._parse_service() + return EventServiceDetach(**self.__event_values) + def __parser_service_query(self): + self.__parse_trace_header() + self._parse_service() + # service parameters + line = self.__current_block.popleft().strip() + if line[0] == '"' and line[-1] == '"': + action = line.strip('"') + self.__event_values['action'] = action + else: + self.__event_values['action'] = None + parameters = [] + while len(self.__current_block) > 0: + parameters.append(self.__current_block.popleft()) + self.__event_values['parameters'] = parameters + # + return EventServiceQuery(**self.__event_values) + def __parser_set_context(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + line = self.__current_block.popleft() + context, line = line.split(']', 1) + key, value = line.split('=', 1) + self.__event_values['context'] = context[1:] + self.__event_values['key'] = key.strip() + self.__event_values['value'] = value.strip() + del self.__event_values['status'] + return EventSetContext(**self.__event_values) + def __parser_error(self): + self.__event_values['place'] = self.__current_block[0].split(' AT ')[1] + self.__parse_trace_header() + att_values = {} + if 'service_mgr' in self.__current_block[0]: + event_class = EventServiceError + self._parse_service() + else: + event_class = EventError + self._parse_attachment_info(att_values) + self.__event_values['attachment_id'] = att_values['attachment_id'] + details = [] + while len(self.__current_block) > 0: + details.append(self.__current_block.popleft()) + self.__event_values['details'] = details + del self.__event_values['status'] + return event_class(**self.__event_values) + def __parser_warning(self): + self.__event_values['place'] = self.__current_block[0].split(' AT ')[1] + self.__parse_trace_header() + att_values = {} + if 'service_mgr' in self.__current_block[0]: + event_class = EventServiceWarning + self._parse_service() + else: + event_class = EventWarning + self._parse_attachment_info(att_values) + self.__event_values['attachment_id'] = att_values['attachment_id'] + details = [] + while len(self.__current_block) > 0: + details.append(self.__current_block.popleft()) + self.__event_values['details'] = details + del self.__event_values['status'] + return event_class(**self.__event_values) + def __parser_sweep_start(self): + self.__parse_trace_header() + self._parse_sweep_attachment() + self._parse_sweep_tr_counters() + del self.__event_values['status'] + return EventSweepStart(**self.__event_values) + def __parser_sweep_progress(self): + self.__parse_trace_header() + self._parse_sweep_attachment() + self._parse_performance() + del self.__event_values['status'] + return EventSweepProgress(**self.__event_values) + def __parser_sweep_finish(self): + self.__parse_trace_header() + self._parse_sweep_attachment() + self._parse_sweep_tr_counters() + self._parse_performance() + del self.__event_values['status'] + del self.__event_values['access'] + return EventSweepFinish(**self.__event_values) + def __parser_sweep_failed(self): + self.__parse_trace_header() + self._parse_sweep_attachment() + del self.__event_values['status'] + return EventSweepFailed(**self.__event_values) + def __parser_blr_compile(self): + self.__parse_trace_header() + # Attachment + values = {} + self._parse_attachment_info(values) + self.__event_values['attachment_id'] = values['attachment_id'] + # BLR + self._parse_blr_statement_id() + self._parse_blrdyn_content() + self._parse_prepare_time() + return EventBLRCompile(**self.__event_values) + def __parser_blr_execute(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + # BLR + self._parse_blr_statement_id() + self._parse_blrdyn_content() + self._parse_performance() + return EventBLRExecute(**self.__event_values) + def __parser_dyn_execute(self): + self.__parse_trace_header() + self._parse_attachment_and_transaction() + # DYN + self._parse_blrdyn_content() + value, ms = self.__current_block.popleft().split() + self.__event_values['run_time'] = int(value) + return EventDYNExecute(**self.__event_values) + def __parser_unknown(self): + items = self.__current_block[0].split() + self.__parse_trace_header() + self.__current_block.appendleft(' '.join(items[2:])) + del self.__event_values['status'] + self.__event_values['data'] = '\n'.join(self.__current_block) + return EventUnknown(**self.__event_values) + def _parse_block(self, parser): + self.__event_values.clear() + result = parser() + return result + def parse_event(self, trace_block): + """Parse single trace event. + + :param list trace_block: List with trace entry lines for single trace event. + + :returns: Named tuple with parsed event. +""" + self.__current_block = collections.deque(trace_block) + if self._is_session_suspended(self.__current_block[0]): + record_parser = self.__parser_trace_suspend + else: + timestamp, status, trace_event = self._parse_header(self.__current_block[0]) + record_parser = self.__parse_map[trace_event] + # + return self._parse_block(record_parser) + def parse(self, lines): + """Parse output from Firebird trace session and yield named tuples describing individual trace log entries/events. + + :param lines: Iterable that return lines produced by firebird trace session. + + :raises `~fdb.ParseError`: When any problem is found in input stream. +""" + for rec in (self.parse_event(x) for x in self._iter_trace_blocks(lines)): + while len(self.__buffer) > 0: + yield self.__buffer.pop(0) + yield rec diff -Nru fdb-1.6.1+dfsg1/fdb/utils.py fdb-2.0.0/fdb/utils.py --- fdb-1.6.1+dfsg1/fdb/utils.py 2014-11-13 15:09:14.000000000 +0000 +++ fdb-2.0.0/fdb/utils.py 2018-04-26 14:39:03.000000000 +0000 @@ -2,7 +2,7 @@ # # PROGRAM: fdb # MODULE: utils.py -# DESCRIPTION: Various utility classes and functions +# DESCRIPTION: Python driver for Firebird - Various utility classes and functions # CREATED: 10.5.2013 # # Software distributed under the License is distributed AS IS, @@ -12,21 +12,40 @@ # # The Original Code was created by Pavel Cisar # -# Copyright (c) 2013 Pavel Cisar +# Copyright (c) Pavel Cisar # and all contributors signed below. # # All Rights Reserved. # Contributor(s): ______________________________________. -def update_meta (self, other): +from operator import attrgetter + +def safe_int(str_value, base=10): + """Always returns integer value from string/None argument. Returns 0 if argument is None. +""" + if str_value: + return int(str_value, base) + else: + return 0 + +def safe_str(str_value): + """Always returns string value from string/None argument. +Returns empty string if argument is None. +""" + if str_value is None: + return '' + else: + return str_value + +def update_meta(self, other): "Helper function for :class:`LateBindingProperty` class." self.__name__ = other.__name__ self.__doc__ = other.__doc__ self.__dict__.update(other.__dict__) return self -class LateBindingProperty (property): - """Peroperty class that binds to getter/setter/deleter methods when **instance** +class LateBindingProperty(property): + """Property class that binds to getter/setter/deleter methods when **instance** of class that define the property is created. This allows you to override these methods in descendant classes (if they are not private) without necessity to redeclare the property itself in descendant class. @@ -89,7 +108,7 @@ e. If you inspect the property you will get back functions with the correct __name__, __doc__, etc. """ - def __new__(self, fget=None, fset=None, fdel=None, doc=None): + def __new__(cls, fget=None, fset=None, fdel=None, doc=None): if fget is not None: def __get__(obj, objtype=None, name=fget.__name__): fget = getattr(obj, name) @@ -110,7 +129,7 @@ class Iterator(object): """Generic iterator implementation. """ - def __init__(self, method, sentinel = None): + def __init__(self, method, sentinel=None): """ :param method: Callable without parameters that returns next item. :param sentinel: Value that when returned by `method` indicates the end @@ -137,7 +156,7 @@ """Property class that forwards calls to getter/setter/deleter methods to respective property methods of another object. This class allows you to "inject" properties from embedded object into class definition of parent object.""" - def __init__(self,obj,prop): + def __init__(self, obj, prop): """ :param string obj: Attribute name with embedded object. :param property prop: Property instance from embedded object. @@ -145,20 +164,20 @@ self.obj = obj self.prop = prop self.__doc__ = prop.__doc__ - def __get__(self,obj,objtype): + def __get__(self, obj, objtype): if obj is None: return self - return self.prop.__get__(getattr(obj,self.obj)) - def __set__(self,obj,val): - self.prop.__set__(getattr(obj,self.obj),val) - def __delete__(self,obj): - self.prop.__delete__(getattr(obj,self.obj)) + return self.prop.__get__(getattr(obj, self.obj)) + def __set__(self, obj, val): + self.prop.__set__(getattr(obj, self.obj), val) + def __delete__(self, obj): + self.prop.__delete__(getattr(obj, self.obj)) class EmbeddedAttribute(property): """Property class that gets/sets attribute of another object. This class allows you to "inject" attributes from embedded object into class definition of parent object.""" - def __init__(self,obj,attr): + def __init__(self, obj, attr): """ :param string obj: Attribute name with embedded object. :param string attr: Attribute name from embedded object. @@ -166,12 +185,12 @@ self.obj = obj self.attr = attr self.__doc__ = attr.__doc__ - def __get__(self,obj,objtype): + def __get__(self, obj, objtype): if obj is None: return self - return getattr(getattr(obj,self.obj),self.attr) - def __set__(self,obj,val): - setattr(getattr(obj,self.obj),self.attr,val) + return getattr(getattr(obj, self.obj), self.attr) + def __set__(self, obj, val): + setattr(getattr(obj, self.obj), self.attr, val) def iter_class_properties(cls): """Iterator that yields `name, property` pairs for all properties in class. @@ -188,23 +207,433 @@ :param class cls: Class object.""" for varname in vars(cls): value = getattr(cls, varname) - if not (isinstance(value, property) or callable(value)): + if not (isinstance(value, property) or callable(value)) and not varname.startswith('_'): yield varname -def embed_attributes(from_class,attr): - """Class decorator that injects properties and non-callable attributes -from another class instance embedded in class instances. +def embed_attributes(from_class, attr): + """Class decorator that injects properties and attributes +from another class instance embedded in class instances. Only attributes and properties that are not +already defined in decorated class are injected. :param class from_class: Class that should extend decorated class. :param string attr: Attribute name that holds instance of embedded class within decorated class instance.""" def d(class_): - for pname,prop in iter_class_properties(from_class): - if not hasattr(class_,pname): - setattr(class_,pname,EmbeddedProperty(attr,prop)) + for pname, prop in iter_class_properties(from_class): + if not hasattr(class_, pname): + setattr(class_, pname, EmbeddedProperty(attr, prop)) for attrname in iter_class_variables(from_class): - if not hasattr(class_,attrname): - setattr(class_,attrname,EmbeddedAttribute(attr,attrname)) + if not hasattr(class_, attrname): + setattr(class_, attrname, EmbeddedAttribute(attr, attrname)) return class_ return d +def make_lambda(expr, params='item', context=None): + """Make lambda function from expression. + + .. versionadded:: 2.0 +""" + if context: + return eval('lambda %s:%s' % (params, expr), context) + else: + return eval('lambda %s:%s' % (params, expr)) + +class ObjectList(list): + """List of objects with additional functionality. + + .. versionadded:: 2.0 +""" + def __init__(self, items=None, _cls=None, key_expr=None): + """ + :param iterable items: Sequence to initialize the collection. + :param _cls: Class or list/tuple of classes. Only instances of these classes would be allowed in collection. + :param str key_expr: Key expression. Must contain item referrence as `item`, for example `item.attribute_name`. + + :raises ValueError: When initialization sequence contains invalid instance. + """ + if items: + super(ObjectList, self).__init__(items) + else: + super(ObjectList, self).__init__() + self.__key_expr = key_expr + self.__frozen = False + self._cls = _cls + self.__map = None + def __check_value(self, value): + if self._cls and not isinstance(value, self._cls): + raise TypeError("Value is not an instance of allowed class") + def __check_mutability(self): + if self.__frozen: + raise TypeError("list is frozen") + def __setitem__(self, index, value): + self.__check_mutability() + self.__check_value(value) + super(ObjectList, self).__setitem__(index, value) + def __setslice__(self, i, j, y): + self.__check_mutability() + super(ObjectList, self).__setslice__(i, j, y) + def __delitem__(self, index): + self.__check_mutability() + super(ObjectList, self).__delitem__(index) + def __delslice__(self, i, j): + self.__check_mutability() + super(ObjectList, self).__delslice__(i, j) + def insert(self, index, item): + """Insert item before index. + + :raises TypeError: When list is frozen or item is not an instance of allowed class""" + self.__check_mutability() + self.__check_value(item) + super(ObjectList, self).insert(index, item) + def append(self, item): + """Add an item to the end of the list. + + :raises TypeError: When list is frozen or item is not an instance of allowed class""" + self.__check_mutability() + self.__check_value(item) + super(ObjectList, self).append(item) + def extend(self, iterable): + """Extend the list by appending all the items in the given iterable. + + :raises TypeError: When list is frozen or item is not an instance of allowed class""" + for item in iterable: + self.append(item) + def sort(self, attrs=None, expr=None, reverse=False): + """Sort items in-place, optionaly using attribute values as key or key expression. + + :param list attrs: List of attribute names. + :param expr: Key expression, a callable accepting one parameter or expression as string referencing list item as `item`. + + .. important:: + + Only one parameter (`attrs` or `expr`) could be specified. If none is present then uses default list sorting rule. + + :raises TypeError: When list is frozen. + + Examples:: + + sort(attrs=['name','degree']) # Sort by item.name, item.degree + sort(expr=lambda x: x.name.upper()) # Sort by upper item.name + sort(expr='item.name.upper()') # Sort by upper item.name + """ + self.__check_mutability() + if attrs: + super(ObjectList, self).sort(key=attrgetter(*attrs), reverse=reverse) + elif expr: + super(ObjectList, self).sort(key=expr if callable(expr) else make_lambda(expr), reverse=reverse) + else: + super(ObjectList, self).sort(reverse=reverse) + def reverse(self): + """Reverse the elements of the list, in place. + + :raises TypeError: When list is frozen.""" + self.__check_mutability() + super(ObjectList, self).reverse() + def clear(self): + """Remove all items from the list. + + :raises TypeError: When list is frozen.""" + self.__check_mutability() + while len(self) > 0: + del self[0] + def freeze(self): + """Set list to immutable (frozen) state.""" + self.__frozen = True + if self.__key_expr: + fce = make_lambda(self.__key_expr) + self.__map = dict(((key, index) for index, key in enumerate((fce(item) for item in self)))) + def filter(self, expr): + """Return new ObjectList of items for which `expr` is evaluated as True. + + :param expr: Boolean expression, a callable accepting one parameter or expression as string referencing list item as `item`. + + Example:: + + filter(lambda x: x.name.startswith("ABC")) + filter('item.name.startswith("ABC")') +""" + fce = expr if callable(expr) else make_lambda(expr) + return ObjectList(self.ifilter(expr), self._cls, self.__key_expr) + def ifilter(self, expr): + """Return generator that yields items for which `expr` is evaluated as True. + + :param expr: Boolean expression, a callable accepting one parameter or expression as string referencing list item as `item`. + + Example:: + + ifilter(lambda x: x.name.startswith("ABC")) + ifilter('item.name.startswith("ABC")') +""" + fce = expr if callable(expr) else make_lambda(expr) + return (item for item in self if fce(item)) + def ifilterfalse(self, expr): + """Return generator that yields items for which `expr` is evaluated as False. + + :param expr: Boolean expression, a callable accepting one parameter or expression as string referencing list item as `item`. + + Example:: + + ifilter(lambda x: x.name.startswith("ABC")) + ifilter('item.name.startswith("ABC")') +""" + fce = expr if callable(expr) else make_lambda(expr) + return (item for item in self if not fce(item)) + def report(self, *args): + """Return list of data produced by expression(s) evaluated on list items. + + Parameter(s) could be one from: + + - A callable accepting one parameter and returning data for output + - One or more expressions as string referencing item as `item`. + + Examples:: + + # returns list of tuples with item.name and item.size + + report(lambda x: (x.name, x.size)) + report('item.name','item.size') + + # returns list of item names + + report(lambda x: x.name) + report('item.name') +""" + if len(args) == 1 and callable(args[0]): + fce = args[0] + else: + attrs = "(%s)" % ",".join(args) if len(args) > 1 else args[0] + fce = make_lambda(attrs) + return [fce(item) for item in self] + def ireport(self, *args): + """Return generator that yields data produced by expression(s) evaluated on list items. + + Parameter(s) could be one from: + + - A callable accepting one parameter and returning data for output + - One or more expressions as string referencing item as `item`. + + Examples:: + + # generator of tuples with item.name and item.size + + report(lambda x: (x.name, x.size)) + report('item.name','item.size') + + # generator of item names + + report(lambda x: x.name) + report('item.name') +""" + if len(args) == 1 and callable(args[0]): + fce = args[0] + else: + attrs = "(%s)" % ",".join(args) if len(args) > 1 else args[0] + fce = make_lambda(attrs) + return (fce(item) for item in self) + def ecount(self, expr): + """Return number of items for which `expr` is evaluated as True. + + :param expr: Boolean expression, a callable accepting one parameter or expression as string referencing list item as `item`. + + Example:: + + ecount(lambda x: x.name.startswith("ABC")) + ecount('item.name.startswith("ABC")') +""" + return sum(1 for item in self.ifilter(expr)) + def split(self, expr): + """Return two new ObjectLists, first with items for which `expr` is evaluated as True and second for `expr` evaluated as False. + + :param expr: Boolean expression, a callable accepting one parameter or expression as string referencing list item as `item`. + + Example:: + + split(lambda x: x.size > 100) + split('item.size > 100') +""" + return ObjectList(self.ifilter(expr), self._cls, self.__key_expr), ObjectList(self.ifilterfalse(expr), self._cls, self.__key_expr) + def extract(self, expr): + """Move items for which `expr` is evaluated as True into new ObjectLists. + + :param expr: Boolean expression, a callable accepting one parameter or expression as string referencing list item as `item`. + + :raises TypeError: When list is frozen. + + Example:: + + extract(lambda x: x.name.startswith("ABC")) + extract('item.name.startswith("ABC")') +""" + self.__check_mutability() + fce = expr if callable(expr) else make_lambda(expr) + l = ObjectList(_cls=self._cls, key_expr=self.__key_expr) + i = 0 + while len(self) > i: + item = self[i] + if fce(item): + l.append(item) + del self[i] + else: + i += 1 + return l + def get(self, value, expr=None): + """Return item with given key value using default or specified key expression, or None if there is no such item. + + Uses very fast method to look up value of default key expression in `frozen` list, otherwise it uses slower list traversal. + + :param value: Searched value. + :param expr: Key value expression, a callable accepting two parameters (item,value) or expression as string referencing list item as `item`. + + :raises TypeError: If key expression is not defined. + + Examples:: + + # Search using default key expression + get('ITEM_NAME') + # Search using callable key expression + get('ITEM_NAME',lambda x: x.name.upper()) + # Search using string key expression + get('ITEM_NAME','item.name.upper()') +""" + if self.__map and not expr: + i = self.__map.get(value) + return self[i] if i is not None else None + if not (self.__key_expr or expr): + raise TypeError("Key expression required") + if callable(expr): + fce = expr + else: + s = '%s == value' % (self.__key_expr if expr is None else expr) + fce = make_lambda(s, 'item,value') + for item in self: + if fce(item, value): + return item + return None + def contains(self, value, expr=None): + """Return True if list has any item with default or specified key expression equal to given value. + + :param value: Tested key value. + :param expr: Key value expression, a callable accepting two parameters (item,value) or expression as string referencing list item as `item`. + + Examples:: + + # Search using default key expression + contains('ITEM_NAME') + # Search using callable key expression + contains('ITEM_NAME',lambda x: x.name.upper()) + # Search using string key expression + contains('ITEM_NAME','item.name.upper()') +""" + return False if self.get(value, expr) is None else True + def all(self, expr): + """Return True if `expr` is evaluated as True for all list elements. + + :param expr: Boolean expression, a callable accepting one parameter or expression as string referencing list item as `item`. + + Example:: + + all(lambda x: x.name.startswith("ABC")) + all('item.name.startswith("ABC")') +""" + fce = expr if callable(expr) else make_lambda(expr) + for item in self: + if not fce(item): + return False + return True + def any(self, expr): + """Return True if `expr` is evaluated as True for any list element. + + :param expr: Boolean expression, a callable accepting one parameter or expression as string referencing list item as `item`. + + Example:: + + any(lambda x: x.name.startswith("ABC")) + any('item.name.startswith("ABC")') +""" + fce = expr if callable(expr) else make_lambda(expr) + for item in self: + if not fce(item): + return True + return False + # + frozen = property(fget=lambda self: self.__frozen, doc='True if list is immutable') + key = property(fget=lambda self: self.__key_expr, doc='Key expression') + +class Visitable(object): + """Base class for Visitor Pattern support. + + .. versionadded:: 2.0 +""" + def accept(self, visitor): + """Visitor Pattern support. Calls `visit(self)` on parameter object. + + :param visitor: Visitor object of Visitor Pattern. + """ + visitor.visit(self) + +class Visitor(object): + """Base class for Visitor Pattern visitors. + + .. versionadded:: 2.0 + + Descendants may implement methods to handle individual object types that follow naming pattern `visit_`. + Calls :meth:`default_action` if appropriate special method is not defined. + + .. important:: + + This implementation uses Python Method Resolution Order (__mro__) to find special handling method, so special + method for given class is used also for its decendants. + + Example:: + + class Node(object): pass + class A(Node): pass + class B(Node): pass + class C(A,B): pass + + class MyVisitor(object): + def default_action(self, obj): + print 'default_action '+obj.__class__.__name__ + + def visit_B(self, obj): + print 'visit_B '+obj.__class__.__name__ + + + a = A() + b = B() + c = C() + visitor = MyVisitor() + visitor.visit(a) + visitor.visit(b) + visitor.visit(c) + + Will create output:: + + default_action A + visit_B B + visit_B C +""" + def visit(self, obj): + """Dispatch to method that handles `obj`. + + First traverses the `obj.__mro__` to try find method with name following `visit_` pattern and calls it with + `obj`. Otherwise it calls :meth:`default_action`. + + :param object obj: Object to be handled by visitor. +""" + meth = None + for cls in obj.__class__.__mro__: + meth = getattr(self, 'visit_'+cls.__name__, None) + if meth: + break + if not meth: + meth = self.default_action + return meth(obj) + def default_action(self, obj): + """Default handler for visited objects. + + :param object obj: Object to be handled. + + .. note:: This implementation does nothing! +""" + pass diff -Nru fdb-1.6.1+dfsg1/fdb.egg-info/entry_points.txt fdb-2.0.0/fdb.egg-info/entry_points.txt --- fdb-1.6.1+dfsg1/fdb.egg-info/entry_points.txt 2016-11-30 14:10:22.000000000 +0000 +++ fdb-2.0.0/fdb.egg-info/entry_points.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ - - \ No newline at end of file diff -Nru fdb-1.6.1+dfsg1/fdb.egg-info/not-zip-safe fdb-2.0.0/fdb.egg-info/not-zip-safe --- fdb-1.6.1+dfsg1/fdb.egg-info/not-zip-safe 2014-11-13 14:55:44.000000000 +0000 +++ fdb-2.0.0/fdb.egg-info/not-zip-safe 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru fdb-1.6.1+dfsg1/fdb.egg-info/PKG-INFO fdb-2.0.0/fdb.egg-info/PKG-INFO --- fdb-1.6.1+dfsg1/fdb.egg-info/PKG-INFO 2016-11-30 14:10:22.000000000 +0000 +++ fdb-2.0.0/fdb.egg-info/PKG-INFO 2018-04-27 11:40:46.000000000 +0000 @@ -1,19 +1,89 @@ -Metadata-Version: 1.1 +Metadata-Version: 2.1 Name: fdb -Version: 1.6.1 -Summary: Firebird RDBMS bindings for Python. +Version: 2.0.0 +Summary: The Python driver for Firebird Home-page: http://www.firebirdsql.org/en/devel-python-driver/ -Author: Pavel Cisar +Author: Pavel Císař Author-email: pcisar@users.sourceforge.net -License: BSD -Description: fdb package is a set of Firebird RDBMS bindings for python. - It works on Python 2.6+ and Python 3.x. +License: UNKNOWN +Project-URL: Funding, https://www.firebirdsql.org/en/donate/ +Project-URL: Documentation, http://fdb2.readthedocs.io/en/latest/ +Project-URL: Bug Reports, http://tracker.firebirdsql.org/browse/PYFB +Project-URL: Say Thanks!, https://saythanks.io/to/pcisar +Project-URL: Source, https://github.com/FirebirdSQL/fdb +Description: ==================================== + FDB - The Python driver for Firebird + ==================================== + |docs| || Home_ || `Bug Reports`_ || Source_ || `Say Thanks!`_ + + FDB is a `Python`_ library package that implements `Python Database API 2.0`_-compliant support for the open source relational + database `Firebird`_ ®. In addition to the minimal feature set of the standard Python DB API, FDB also exposes the entire native + (old-style) client API of the database engine. Notably: + + * Automatic data conversion from strings on input. + * Automatic input/output conversions of textual data between UNICODE and database character sets. + * Support for prepared SQL statements. + * Multiple independent transactions per single connection. + * All transaction parameters that Firebird supports, including table access specifications. + * Distributed transactions. + * Firebird BLOB support, including support for stream BLOBs. + * Support for Firebird Events. + * Support for Firebird ARRAY data type. + * Support for all Firebird Services + + FDB also contains extensive collection of submodules that simplify various Firebird-related tasks. Notably: + + * Database schema + * Firebird monitoring tables + * Parsing Firebird trace & audit logs + * Parsing Firebird server log + * Parsing Firebird gstat utility output + + FDB is implemented on top of Firebird client library using ctypes, and currently uses only traditional Firebird API. + + FDB works with Firebird 2.0 and newer, and Python 2.7 and 3.4+. + + FDB is free – covered by a permissive BSD-style license that both commercial and noncommercial users should find agreeable. + + FDB is replacement for discontinued KInterbasDB library, and as such it's designed to be as much compatible + with KInterbasDB as possible, but there are some differences. See FDB documentation for full description + of these differences. + + |donate| + + .. _Python: http://python.org + .. _Python Database API 2.0: http://www.python.org/dev/peps/pep-0249/ + .. _Firebird: http://www.firebirdsql.org + .. _Bug Reports: http://tracker.firebirdsql.org/browse/PYFB + .. _Home: http://www.firebirdsql.org/en/devel-python-driver/ + .. _Source: https://github.com/FirebirdSQL/fdb + .. _Say Thanks!: https://saythanks.io/to/pcisar + + .. |donate| image:: https://www.firebirdsql.org/img/donate/donate_to_firebird.gif + :alt: Contribute to the development + :scale: 100% + :target: https://www.firebirdsql.org/en/donate/ + + .. |docs| image:: https://readthedocs.org/projects/fdb/badge/?version=v2.0 + :alt: Documentation Status + :scale: 100% + :target: http://fdb.readthedocs.io/en/v2.0/ Keywords: Firebird Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS Classifier: Topic :: Database +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4 +Description-Content-Type: text/x-rst diff -Nru fdb-1.6.1+dfsg1/fdb.egg-info/requires.txt fdb-2.0.0/fdb.egg-info/requires.txt --- fdb-1.6.1+dfsg1/fdb.egg-info/requires.txt 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/fdb.egg-info/requires.txt 2018-04-27 11:40:46.000000000 +0000 @@ -0,0 +1 @@ +future>=0.16.0 diff -Nru fdb-1.6.1+dfsg1/fdb.egg-info/SOURCES.txt fdb-2.0.0/fdb.egg-info/SOURCES.txt --- fdb-1.6.1+dfsg1/fdb.egg-info/SOURCES.txt 2016-11-30 14:10:22.000000000 +0000 +++ fdb-2.0.0/fdb.egg-info/SOURCES.txt 2018-04-27 11:40:46.000000000 +0000 @@ -1,87 +1,40 @@ +.gitignore LICENSE.TXT -MANIFEST.in -PKG-INFO -README +README.rst +requirements.txt setup.cfg setup.py -docs/.buildinfo -docs/changelog.html -docs/differences-from-kdb.html -docs/genindex.html -docs/getting-started.html -docs/index.html -docs/license.html -docs/listinv.py -docs/objects.inv -docs/py-modindex.html -docs/python-db-api-compliance.html -docs/reference.html -docs/requirements.html -docs/search.html -docs/searchindex.js -docs/usage-guide.html -docs/_sources/changelog.txt -docs/_sources/differences-from-kdb.txt -docs/_sources/getting-started.txt -docs/_sources/index.txt -docs/_sources/license.txt -docs/_sources/python-db-api-compliance.txt -docs/_sources/reference.txt -docs/_sources/usage-guide.txt -docs/_static/ajax-loader.gif -docs/_static/basic.css -docs/_static/comment-bright.png -docs/_static/comment-close.png -docs/_static/comment.png -docs/_static/dialog-note.png -docs/_static/dialog-seealso.png -docs/_static/dialog-topic.png -docs/_static/dialog-warning.png -docs/_static/doctools.js -docs/_static/down-pressed.png -docs/_static/down.png -docs/_static/epub.css -docs/_static/fdbtheme.css -docs/_static/file.png -docs/_static/footerbg.png -docs/_static/headerbg.png -docs/_static/ie6.css -docs/_static/jquery.js -docs/_static/middlebg.png -docs/_static/minus.png -docs/_static/plus.png -docs/_static/pygments.css -docs/_static/searchtools.js -docs/_static/transparent.gif -docs/_static/underscore.js -docs/_static/up-pressed.png -docs/_static/up.png -docs/_static/websupport.js fdb/__init__.py fdb/blr.py fdb/fbcore.py +fdb/gstat.py fdb/ibase.py +fdb/log.py fdb/monitor.py fdb/schema.py fdb/services.py +fdb/trace.py fdb/utils.py fdb.egg-info/PKG-INFO fdb.egg-info/SOURCES.txt fdb.egg-info/dependency_links.txt -fdb.egg-info/entry_points.txt -fdb.egg-info/not-zip-safe +fdb.egg-info/requires.txt fdb.egg-info/top_level.txt sphinx/Makefile sphinx/changelog.txt sphinx/conf.py -sphinx/differences-from-kdb.txt sphinx/getting-started.txt sphinx/index.txt sphinx/license.txt sphinx/python-db-api-compliance.txt sphinx/reference.txt -sphinx/requirements.txt sphinx/usage-guide.txt +sphinx/.static/basic.css +sphinx/.static/dialog-note.png +sphinx/.static/dialog-seealso.png +sphinx/.static/dialog-topic.png +sphinx/.static/dialog-warning.png +sphinx/.static/fdbtheme.css sphinx/fdbtheme/layout.html sphinx/fdbtheme/theme.conf sphinx/fdbtheme/static/dialog-note.png @@ -99,4 +52,20 @@ test/fbtest20.fdb test/fbtest21.fdb test/fbtest25.fdb +test/fbtest30.fdb +test/gstat25-a.out +test/gstat25-d.out +test/gstat25-f.out +test/gstat25-h.out +test/gstat25-i.out +test/gstat25-r.out +test/gstat25-s.out +test/gstat30-a.out +test/gstat30-d.out +test/gstat30-e.out +test/gstat30-f.out +test/gstat30-h.out +test/gstat30-i.out +test/gstat30-r.out +test/gstat30-s.out test/testfdb.py \ No newline at end of file diff -Nru fdb-1.6.1+dfsg1/.gitignore fdb-2.0.0/.gitignore --- fdb-1.6.1+dfsg1/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/.gitignore 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,16 @@ +# general things to ignore +build/ +dist/ +docs/ +sphinx/.build/ +*.egg-info/ +*.egg +*.py[cod] +__pycache__/ +*.so +*~ +*.wpr +*.wpu +*.log + + diff -Nru fdb-1.6.1+dfsg1/MANIFEST.in fdb-2.0.0/MANIFEST.in --- fdb-1.6.1+dfsg1/MANIFEST.in 2014-11-13 14:55:46.000000000 +0000 +++ fdb-2.0.0/MANIFEST.in 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -include LICENSE.TXT -include test/fbtest.fdb -include docs/* diff -Nru fdb-1.6.1+dfsg1/PKG-INFO fdb-2.0.0/PKG-INFO --- fdb-1.6.1+dfsg1/PKG-INFO 2016-11-30 14:10:22.000000000 +0000 +++ fdb-2.0.0/PKG-INFO 2018-04-27 11:40:46.000000000 +0000 @@ -1,19 +1,89 @@ -Metadata-Version: 1.1 +Metadata-Version: 2.1 Name: fdb -Version: 1.6.1 -Summary: Firebird RDBMS bindings for Python. +Version: 2.0.0 +Summary: The Python driver for Firebird Home-page: http://www.firebirdsql.org/en/devel-python-driver/ -Author: Pavel Cisar +Author: Pavel Císař Author-email: pcisar@users.sourceforge.net -License: BSD -Description: fdb package is a set of Firebird RDBMS bindings for python. - It works on Python 2.6+ and Python 3.x. +License: UNKNOWN +Project-URL: Funding, https://www.firebirdsql.org/en/donate/ +Project-URL: Documentation, http://fdb2.readthedocs.io/en/latest/ +Project-URL: Bug Reports, http://tracker.firebirdsql.org/browse/PYFB +Project-URL: Say Thanks!, https://saythanks.io/to/pcisar +Project-URL: Source, https://github.com/FirebirdSQL/fdb +Description: ==================================== + FDB - The Python driver for Firebird + ==================================== + |docs| || Home_ || `Bug Reports`_ || Source_ || `Say Thanks!`_ + + FDB is a `Python`_ library package that implements `Python Database API 2.0`_-compliant support for the open source relational + database `Firebird`_ ®. In addition to the minimal feature set of the standard Python DB API, FDB also exposes the entire native + (old-style) client API of the database engine. Notably: + + * Automatic data conversion from strings on input. + * Automatic input/output conversions of textual data between UNICODE and database character sets. + * Support for prepared SQL statements. + * Multiple independent transactions per single connection. + * All transaction parameters that Firebird supports, including table access specifications. + * Distributed transactions. + * Firebird BLOB support, including support for stream BLOBs. + * Support for Firebird Events. + * Support for Firebird ARRAY data type. + * Support for all Firebird Services + + FDB also contains extensive collection of submodules that simplify various Firebird-related tasks. Notably: + + * Database schema + * Firebird monitoring tables + * Parsing Firebird trace & audit logs + * Parsing Firebird server log + * Parsing Firebird gstat utility output + + FDB is implemented on top of Firebird client library using ctypes, and currently uses only traditional Firebird API. + + FDB works with Firebird 2.0 and newer, and Python 2.7 and 3.4+. + + FDB is free – covered by a permissive BSD-style license that both commercial and noncommercial users should find agreeable. + + FDB is replacement for discontinued KInterbasDB library, and as such it's designed to be as much compatible + with KInterbasDB as possible, but there are some differences. See FDB documentation for full description + of these differences. + + |donate| + + .. _Python: http://python.org + .. _Python Database API 2.0: http://www.python.org/dev/peps/pep-0249/ + .. _Firebird: http://www.firebirdsql.org + .. _Bug Reports: http://tracker.firebirdsql.org/browse/PYFB + .. _Home: http://www.firebirdsql.org/en/devel-python-driver/ + .. _Source: https://github.com/FirebirdSQL/fdb + .. _Say Thanks!: https://saythanks.io/to/pcisar + + .. |donate| image:: https://www.firebirdsql.org/img/donate/donate_to_firebird.gif + :alt: Contribute to the development + :scale: 100% + :target: https://www.firebirdsql.org/en/donate/ + + .. |docs| image:: https://readthedocs.org/projects/fdb/badge/?version=v2.0 + :alt: Documentation Status + :scale: 100% + :target: http://fdb.readthedocs.io/en/v2.0/ Keywords: Firebird Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS Classifier: Topic :: Database +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4 +Description-Content-Type: text/x-rst diff -Nru fdb-1.6.1+dfsg1/README fdb-2.0.0/README --- fdb-1.6.1+dfsg1/README 2015-08-30 14:23:10.000000000 +0000 +++ fdb-2.0.0/README 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -FDB package is a set of Firebird RDBMS bindings for Python. It works on Python 2.6+ and Python 3.x. - -FDB is replacement for discontinued KInterbasDB library, and as such it's designed to be as much compatible -with KInterbasDB as possible, but there are some differences. See FDB documentation for full description -of these differences. - diff -Nru fdb-1.6.1+dfsg1/README.rst fdb-2.0.0/README.rst --- fdb-1.6.1+dfsg1/README.rst 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/README.rst 2018-04-27 11:25:31.000000000 +0000 @@ -0,0 +1,58 @@ +==================================== +FDB - The Python driver for Firebird +==================================== + +|docs| || Home_ || `Bug Reports`_ || Source_ || `Say Thanks!`_ + +FDB is a `Python`_ library package that implements `Python Database API 2.0`_-compliant support for the open source relational +database `Firebird`_ ®. In addition to the minimal feature set of the standard Python DB API, FDB also exposes the entire native +(old-style) client API of the database engine. Notably: + +* Automatic data conversion from strings on input. +* Automatic input/output conversions of textual data between UNICODE and database character sets. +* Support for prepared SQL statements. +* Multiple independent transactions per single connection. +* All transaction parameters that Firebird supports, including table access specifications. +* Distributed transactions. +* Firebird BLOB support, including support for stream BLOBs. +* Support for Firebird Events. +* Support for Firebird ARRAY data type. +* Support for all Firebird Services + +FDB also contains extensive collection of submodules that simplify various Firebird-related tasks. Notably: + +* Database schema +* Firebird monitoring tables +* Parsing Firebird trace & audit logs +* Parsing Firebird server log +* Parsing Firebird gstat utility output + +FDB is implemented on top of Firebird client library using ctypes, and currently uses only traditional Firebird API. + +FDB works with Firebird 2.0 and newer, and Python 2.7 and 3.4+. + +FDB is free – covered by a permissive BSD-style license that both commercial and noncommercial users should find agreeable. + +FDB is replacement for discontinued KInterbasDB library, and as such it's designed to be as much compatible +with KInterbasDB as possible, but there are some differences. See FDB documentation for full description +of these differences. + +|donate| + +.. _Python: http://python.org +.. _Python Database API 2.0: http://www.python.org/dev/peps/pep-0249/ +.. _Firebird: http://www.firebirdsql.org +.. _Bug Reports: http://tracker.firebirdsql.org/browse/PYFB +.. _Home: http://www.firebirdsql.org/en/devel-python-driver/ +.. _Source: https://github.com/FirebirdSQL/fdb +.. _Say Thanks!: https://saythanks.io/to/pcisar + +.. |donate| image:: https://www.firebirdsql.org/img/donate/donate_to_firebird.gif + :alt: Contribute to the development + :scale: 100% + :target: https://www.firebirdsql.org/en/donate/ + +.. |docs| image:: https://readthedocs.org/projects/fdb/badge/?version=v2.0 + :alt: Documentation Status + :scale: 100% + :target: http://fdb.readthedocs.io/en/v2.0/ diff -Nru fdb-1.6.1+dfsg1/requirements.txt fdb-2.0.0/requirements.txt --- fdb-1.6.1+dfsg1/requirements.txt 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/requirements.txt 2018-04-26 16:03:20.000000000 +0000 @@ -0,0 +1,2 @@ +future>=0.16.0 +sphinx-bootstrap-theme==0.6.5 diff -Nru fdb-1.6.1+dfsg1/setup.cfg fdb-2.0.0/setup.cfg --- fdb-1.6.1+dfsg1/setup.cfg 2016-11-30 14:10:22.000000000 +0000 +++ fdb-2.0.0/setup.cfg 2018-04-27 11:40:46.000000000 +0000 @@ -7,3 +7,9 @@ source-dir = sphinx all-files = True +[metadata] +license_file = LICENSE.TXT + +[bdist_wheel] +universal = 1 + diff -Nru fdb-1.6.1+dfsg1/setup.py fdb-2.0.0/setup.py --- fdb-1.6.1+dfsg1/setup.py 2015-09-08 08:33:04.000000000 +0000 +++ fdb-2.0.0/setup.py 2018-04-27 10:13:09.000000000 +0000 @@ -1,40 +1,65 @@ -#!/usr/bin/env python -"""fdb package is a set of Firebird RDBMS bindings for python. -It works on Python 2.6+ and Python 3.x. - +#coding:utf-8 +"""A setuptools based setup module for FDB package. +See: +https://packaging.python.org/en/latest/distributing.html +https://github.com/pypa/sampleproject """ + +# Always prefer setuptools over distutils from setuptools import setup, find_packages +# To use a consistent encoding +from codecs import open +from os import path from fdb import __version__ -classifiers = [ - 'Development Status :: 5 - Production/Stable', - 'License :: OSI Approved :: BSD License', - 'Operating System :: OS Independent', - 'Programming Language :: Python', - 'Topic :: Database', -] - -setup(name='fdb', - version=__version__, - description = 'Firebird RDBMS bindings for Python.', - url='http://www.firebirdsql.org/en/devel-python-driver/', - classifiers=classifiers, - keywords=['Firebird'], - license='BSD', - author='Pavel Cisar', - author_email='pcisar@users.sourceforge.net', - long_description=__doc__, - install_requires=[], - setup_requires=[], - packages=find_packages(exclude=['ez_setup']), +here = path.abspath(path.dirname(__file__)) + +# Get the long description from the README file +with open(path.join(here, 'README.rst'), encoding='utf-8') as f: + long_description = f.read() + +# Arguments marked as "Required" below must be included for upload to PyPI. +# Fields marked as "Optional" may be commented out. + +setup( + name='fdb', + version=__version__, + description='The Python driver for Firebird', + long_description=long_description, + long_description_content_type='text/x-rst', + url='http://www.firebirdsql.org/en/devel-python-driver/', + author='Pavel Císař', + author_email='pcisar@users.sourceforge.net', + classifiers=[ + 'Development Status :: 5 - Production/Stable', + + 'Intended Audience :: Developers', + + 'License :: OSI Approved :: BSD License', + + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + + 'Operating System :: POSIX :: Linux', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: MacOS', + + 'Topic :: Database', +], + keywords='Firebird', # Optional + packages=find_packages(), # Required + install_requires=['future>=0.16.0'], # Optional + python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4', test_suite='nose.collector', - #include_package_data=True, - package_data={'': ['*.txt'], - 'test':'fbtest.fdb'}, - #message_extractors={'fdb': [ - #('**.py', 'python', None), - #('public/**', 'ignore', None)]}, - zip_safe=False, - entry_points=""" - """, + project_urls={ + 'Documentation': 'http://fdb2.readthedocs.io/en/latest/', + 'Bug Reports': 'http://tracker.firebirdsql.org/browse/PYFB', + 'Funding': 'https://www.firebirdsql.org/en/donate/', + 'Say Thanks!': 'https://saythanks.io/to/pcisar', + 'Source': 'https://github.com/FirebirdSQL/fdb', + }, ) diff -Nru fdb-1.6.1+dfsg1/sphinx/changelog.txt fdb-2.0.0/sphinx/changelog.txt --- fdb-1.6.1+dfsg1/sphinx/changelog.txt 2016-11-28 14:37:26.000000000 +0000 +++ fdb-2.0.0/sphinx/changelog.txt 2018-04-26 14:39:03.000000000 +0000 @@ -2,41 +2,72 @@ Changelog ######### -* `Version 1.6.1`_ (30.11.2016) -* `Version 1.6`_ (30.3.2016) -* `Version 1.5.1`_ (22.2.2016) -* `Version 1.5`_ (7.1.2016) -* `Version 1.4.11`_ (12.11.2015) -* `Version 1.4.10`_ (31.8.2015) -* `Version 1.4.9`_ (30.6.2015) -* `Version 1.4.8`_ (23.6.2015) -* `Version 1.4.7`_ (4.6.2015) -* `Version 1.4.6`_ (29.5.2015) -* `Version 1.4.5`_ (31.3.2015) -* `Version 1.4.4`_ (27.2.2015) -* `Version 1.4.3`_ (14.11.2014) -* `Version 1.4.2`_ (13.11.2014) -* `Version 1.4.1`_ (25.6.2014) -* `Version 1.4`_ (24.6.2013) -* `Version 1.3`_ (7.6.2013) -* `Version 1.2`_ (31.5.2013) -* `Version 1.1.1`_ (14.5.2013) -* `Version 1.1`_ (9.4.2013) -* `Version 1.0`_ (7.3.2013) -* `Version 0.9.9`_ (30.11.2012) -* `Version 0.9.1`_ (8. 9. 2012) -* `Version 0.9`_ (5. 9. 2012) -* `Version 0.8.5`_ (28. 6. 2012) -* `Version 0.8`_ (31. 5. 2012) -* `Version 0.7.2`_ (27. 3. 2012) -* `Version 0.7.1`_ (31. 1. 2012) -* `Version 0.7`_ (21. 12. 2011) + +Version 2.0 +=========== + +.. important:: + + This is initial release of new *"SweetBitter"* driver generation. + + During this (v2) generation FDB driver will undergo a transition from development centered around Python 2.7 / Firebird 2.x to development centered around Python 3 / Firebird 3. There are some backward incompatible changes between v2 and v1 generation, and you may expect some also between individual releases of second generation. To *soften* this *bitter* pill, the second generation will have new functionality, enhancements and optimalizations gradually added into each public release. + + **The second generation is also the last one that will directly support Python 2.7 and will be tested with Firebird 2.** + + The plan is to move forward with v3 generation (Python 3/Firebird 3+) as soon as v2 code base will become mature. + +Improvements +------------ + +- Hooks. +- New modules for parsing Firebird trace & audit logs (:mod:`fdb.trace`), gstat output (:mod:`fdb.gstat`) and server log (:mod:`fdb.log`) +- Added :class:`fdb.utils.ObjectList` class for improved object collection manipulation. +- Modules :mod:`~fdb.monitor` and :mod:`~fdb.schema` now use new :class:`fdb.utils.ObjectList` for collections of information objects. +- Methods :meth:`fdb.Connection.database_info()` and :meth:`fdb.Transaction.transaction_info()` now distinguish between + text and binary strings with `result_type` code. +- Significant changes to documentation. + +Other changes +------------- + +- Exception fdb.Warning removed as duplicate to standard Warning exception. +- Changes to make pylint more happy about fdb code. +- Parameter `result_type` in methods :meth:`fdb.Transaction.transaction_info()` and :meth:`fdb.Connection.database_info()` now does not support value 's' for string results as these converted strings to unicode in Python 3 which does not makes sense (it's always binary data, at least partially). Instead new value 'b' is introduced for binary string results. +- Reworked Visitor Pattern support in :mod:`~fdb.schema` module, added classes :class:`fdb.utils.Visitable` and :class:`fdb.utils.Visitor`. +- Method :meth:`fdb.schema.Schema.reload` now takes as parameter numeric metadata category code(s) instead string name. +- Cleanup of unused constants + +Bugs Fixed +---------- + +- (PYFB-72) - exception_from_status function gives an UnicodeDecodeError +- (PYFB-73) - Buffer filled with zero-characters is returned instead of actual content of page when page number more than 64 K +- (Unregistered) - BOOLEAN arrays were not supported +- (Unregistered) - Issues with Python 3 and Windows compatibility from latest 1.x versions. + +Version 1.8 +=========== + +- In relation to (PYFB-71_) a better memory exhaustion safeguard was implemented for materialized blobs. See :ref:`Working with BLOBs ` for details. +- Added service support for backup and restore from/to local byte stream. See :meth:`~fdb.services.Connection.local_backup()` and :meth:`~fdb.services.Connection.local_restore()` for details. +- Added attribute :attr:`fdb.schema.TableColumn.id` (RDB$FIELD_ID) +- Added method :meth:`fdb.BlobReader.get_info()`. + +Version 1.7 +=========== + +- (PYFB-66_) - Port parameter for connect and create_database is not used +- (PYFB-69_) - Can not connect to FB services if set ISC_USER & ISC_PASSWORD by os.environ[...] +- (PYFB-70_) - executemany(operation, seq_of_parameters) appears to run slower than it should +- Number of fixes to DDL generators in schema module +- Added support for :class:`~fdb.schema.Filter` and :class:`~fdb.schema.BackupHistory` in schema module. +- Added DDL scripts generator :meth:`~fdb.schema.Schema.get_metadata_ddl`. Version 1.6.1 ============= -- (PYFB-68) - Add support for isc_spb_sts_table option -- (PYFB-67) - Cursor fails after use with executemany(). ReferenceError: weakly-referenced object no longer exists +- (PYFB-68_) - Add support for isc_spb_sts_table option +- (PYFB-67_) - Cursor fails after use with executemany(). ReferenceError: weakly-referenced object no longer exists Version 1.6 =========== @@ -44,7 +75,7 @@ - New: Extended support for database and transaction info (new attributes and functions on :class:`~fdb.Connection` and :class:`~fdb.Transaction`, fixes and improvements to :meth:`~fdb.Connection.db_info` and :meth:`~fdb.Connection.database_info`). - Fix: Missing character sets for automatic translations. -- (PYFB-64) - cursor.description throws ReferenceError after executemany INSERT +- (PYFB-64_) - cursor.description throws ReferenceError after executemany INSERT Version 1.5.1 ============= @@ -67,27 +98,27 @@ - GBAK statistics (service) - On-line validation (service) -- (PYFB-60) Cursor: executemany(operation, seq_of_parameters) does PREPARE of for each parameter from +- (PYFB-60_) Cursor: executemany(operation, seq_of_parameters) does PREPARE of for each parameter from Version 1.4.11 ============== -- (PYFB-58) Severe performance loss and minor memory leak +- (PYFB-58_) Severe performance loss and minor memory leak Version 1.4.10 ============== -- (PYFB-54) Windows 7x64 and FB2.5.2x32 Python2.7: Error in Registry Path. FDB driver does not find the library fbclient.dll -- (PYFB-55) get_sql_for incorrect generate sql query for Views -- (PYFB-56) schema.reload typing mistake for views -- (PYFB-57) role.privileges does not return correct privilege list +- (PYFB-54_) Windows 7x64 and FB2.5.2x32 Python2.7: Error in Registry Path. FDB driver does not find the library fbclient.dll +- (PYFB-55_) get_sql_for incorrect generate sql query for Views +- (PYFB-56_) schema.reload typing mistake for views +- (PYFB-57_) role.privileges does not return correct privilege list Version 1.4.9 ============= -- (PYFB-51) .get_sql_for('create') returns invalid output parameters -- (PYFB-52) isc_info* types which are _DATABASE_INFO_CODES_WITH_COUNT_RESULTS raises TypeError: 'float' object cannot be interpreted as an integer +- (PYFB-51_) .get_sql_for('create') returns invalid output parameters +- (PYFB-52_) isc_info* types which are _DATABASE_INFO_CODES_WITH_COUNT_RESULTS raises TypeError: 'float' object cannot be interpreted as an integer Version 1.4.8 ============= @@ -105,7 +136,7 @@ Bugs Fixed ---------- -- (PYFB-50) Exception ReferenceError: 'weakly-referenced object no longer exists' in PreparedStatement and Cursor +- (PYFB-50_) Exception ReferenceError: 'weakly-referenced object no longer exists' in PreparedStatement and Cursor Version 1.4.5 ============= @@ -113,7 +144,7 @@ Bugs Fixed ---------- -- (PYFB-49) Memory and DB resource leak due to circular references. +- (PYFB-49_) Memory and DB resource leak due to circular references. Version 1.4.4 ============= @@ -121,12 +152,12 @@ Improvements ------------ -- (PYFB-47) Firebird client library path added as optinal parameter to :func:`fdb.connect` and :func:`fdb.create_database`. +- (PYFB-47_) Firebird client library path added as optinal parameter to :func:`fdb.connect` and :func:`fdb.create_database`. Bugs Fixed ---------- -- Additional fix related to PYFB-43 +- Additional fix related to PYFB-43_ - Additional correction for unregistered problem with circular ref. between PS and Cursor when explicit PS is executed. @@ -136,7 +167,7 @@ Bugs Fixed ---------- -- Previous fix for PYFB-43 was incomplete, corrected. +- Previous fix for PYFB-43_ was incomplete, corrected. Version 1.4.2 @@ -145,11 +176,11 @@ Improvements ------------ -- In relation to PYFB-43 I had to make a **backward incompatible change** to event processing API. Starting from this version +- In relation to PYFB-43_ I had to make a **backward incompatible change** to event processing API. Starting from this version `EventConduit` does not automatically starts collection of events upon creation, but it's now necessary to call :meth:`~fdb.EventConduit.begin` method. To mitigate the inconvenience, EventConduit now supports context manager protocol that ensures calls to begin() and close() via `with` statement. -- In relation to PYFB-39 I have decided to drop support for implicitly cached and reused prepared statements. I never +- In relation to PYFB-39_ I have decided to drop support for implicitly cached and reused prepared statements. I never liked this feature as I think it's a sneaky method how to put some performance to badly written applications that in worst case may lead to significant resource consumption on server side when developers are not only lazy but also stupid. It was implemented for the sake of compatibility with KInterbasDB. @@ -159,8 +190,8 @@ Bugs Fixed ---------- -- PYFB-44 - Inserting a datetime.date into a TIMESTAMP column does not work -- PYFB-42 - Python 3.4 and FDB - backup throws an exception +- PYFB-44_ - Inserting a datetime.date into a TIMESTAMP column does not work +- PYFB-42_ - Python 3.4 and FDB - backup throws an exception - Unregistered - Fixes in monitor.TransactionInfo @@ -170,7 +201,7 @@ Improvements ------------ -- PYFB-40 - fbclient.dll is not found if not in path. +- PYFB-40_ - fbclient.dll is not found if not in path. Aside from registry lookup, client library isn't loaded until first call to :func:`fdb.connect`, :func:`fdb.create_database` or :func:`fdb.load_api` (which now supports optional specification of Firebird Client Library to load). - Adjustments for Firebird 3.0 (Alpha1) @@ -200,7 +231,7 @@ Bugs Fixed ---------- -- PYFB-37 - Unicode Strings incorrect not allowed for insertion into BLOB SubType 1. +- PYFB-37_ - Unicode Strings incorrect not allowed for insertion into BLOB SubType 1. Version 1.3 @@ -279,8 +310,8 @@ Bugs Fixed ---------- -- PYFB-35 - Call to fetch after a sql statement without a result should raise exception -- PYFB-34 - Server resources not released on PreparedStatement destruction +- PYFB-35_ - Call to fetch after a sql statement without a result should raise exception +- PYFB-34_ - Server resources not released on PreparedStatement destruction Version 1.1 =========== @@ -293,7 +324,7 @@ Bugs Fixed ---------- -- PYFB-30 - BLOBs are truncated at first zero byte +- PYFB-30_ - BLOBs are truncated at first zero byte Version 1.0 =========== @@ -306,7 +337,7 @@ Bugs Fixed ---------- -- PYFB-25 - Truncate long text from VARCHAR(5000) +- PYFB-25_ - Truncate long text from VARCHAR(5000) Version 0.9.9 ============= @@ -362,7 +393,7 @@ Bugs Fixed ---------- -- Fix for PYFB-17 and PYFB-18 (see our JIRA tracker for details) +- Fix for PYFB-17_ and PYFB-18_ (see our JIRA tracker for details) - Fixes for automatic unicode conversions + refactoring - Some optimizations @@ -373,13 +404,13 @@ New Features ------------ -- Support for Firebird Event Notifications - `JIRA entry ` +- (PYFB-8_) - Support for Firebird Event Notifications Bugs Fixes ---------- -- database_info (isc_info_firebird_version) fails opn amd64 linux - `JIRA entry ` -- more than 2 consecutive cursor open execute and iter fail - `JIRA entry ` +- (PYFB-16_) - database_info (isc_info_firebird_version) fails opn amd64 linux +- (PYFB-15_) - more than 2 consecutive cursor open execute and iter fail Version 0.7.2 @@ -416,3 +447,38 @@ - TRACE service - Documentation (but you can use KInterbasDB one as FDB is as close to it as possible). - Python 3.x support (haven’t had time to test it, but it shouldn’t be hard to make it work there) + +.. _PYFB-8: http://tracker.firebirdsql.org/browse/PYFB-8 +.. _PYFB-15: http://tracker.firebirdsql.org/browse/PYFB-15 +.. _PYFB-16: http://tracker.firebirdsql.org/browse/PYFB-16 +.. _PYFB-17: http://tracker.firebirdsql.org/browse/PYFB-17 +.. _PYFB-18: http://tracker.firebirdsql.org/browse/PYFB-18 +.. _PYFB-24: http://tracker.firebirdsql.org/browse/PYFB-24 +.. _PYFB-25: http://tracker.firebirdsql.org/browse/PYFB-25 +.. _PYFB-30: http://tracker.firebirdsql.org/browse/PYFB-30 +.. _PYFB-34: http://tracker.firebirdsql.org/browse/PYFB-34 +.. _PYFB-35: http://tracker.firebirdsql.org/browse/PYFB-35 +.. _PYFB-37: http://tracker.firebirdsql.org/browse/PYFB-37 +.. _PYFB-39: http://tracker.firebirdsql.org/browse/PYFB-39 +.. _PYFB-40: http://tracker.firebirdsql.org/browse/PYFB-40 +.. _PYFB-42: http://tracker.firebirdsql.org/browse/PYFB-42 +.. _PYFB-43: http://tracker.firebirdsql.org/browse/PYFB-43 +.. _PYFB-44: http://tracker.firebirdsql.org/browse/PYFB-44 +.. _PYFB-47: http://tracker.firebirdsql.org/browse/PYFB-47 +.. _PYFB-49: http://tracker.firebirdsql.org/browse/PYFB-49 +.. _PYFB-50: http://tracker.firebirdsql.org/browse/PYFB-50 +.. _PYFB-51: http://tracker.firebirdsql.org/browse/PYFB-51 +.. _PYFB-52: http://tracker.firebirdsql.org/browse/PYFB-52 +.. _PYFB-54: http://tracker.firebirdsql.org/browse/PYFB-54 +.. _PYFB-55: http://tracker.firebirdsql.org/browse/PYFB-55 +.. _PYFB-56: http://tracker.firebirdsql.org/browse/PYFB-56 +.. _PYFB-57: http://tracker.firebirdsql.org/browse/PYFB-57 +.. _PYFB-58: http://tracker.firebirdsql.org/browse/PYFB-58 +.. _PYFB-60: http://tracker.firebirdsql.org/browse/PYFB-60 +.. _PYFB-64: http://tracker.firebirdsql.org/browse/PYFB-64 +.. _PYFB-66: http://tracker.firebirdsql.org/browse/PYFB-66 +.. _PYFB-67: http://tracker.firebirdsql.org/browse/PYFB-67 +.. _PYFB-68: http://tracker.firebirdsql.org/browse/PYFB-68 +.. _PYFB-69: http://tracker.firebirdsql.org/browse/PYFB-69 +.. _PYFB-70: http://tracker.firebirdsql.org/browse/PYFB-70 +.. _PYFB-71: http://tracker.firebirdsql.org/browse/PYFB-71 diff -Nru fdb-1.6.1+dfsg1/sphinx/conf.py fdb-2.0.0/sphinx/conf.py --- fdb-1.6.1+dfsg1/sphinx/conf.py 2016-11-30 13:45:26.000000000 +0000 +++ fdb-2.0.0/sphinx/conf.py 2018-04-26 14:39:03.000000000 +0000 @@ -15,6 +15,7 @@ # serve to show the default. import sys, os +import sphinx_bootstrap_theme # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it @@ -42,16 +43,16 @@ # General information about the project. project = u'FDB' -copyright = u'2009-2016, David Rushby, Pavel Cisar' +copyright = u'2009-2018 Pavel Cisar' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '1.6.1' +version = '2.0' # The full version, including alpha/beta/rc tags. -release = '1.6.1' +release = '2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -64,11 +65,11 @@ #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. -unused_docs = ['usage'] +unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. -exclude_trees = ['.build','requirements.txt'] +exclude_trees = ['.build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None @@ -90,15 +91,102 @@ # Autodoc options # --------------- autoclass_content = 'both' +autodoc_default_flags = ['members','inherited-members','undoc-members'] +autodoc_member_order = 'groupwise' # Intersphinx options -intersphinx_mapping = {'python': ('http://docs.python.org/', None)} +intersphinx_mapping = {'python': ('http://docs.python.org/2/', None)} + # Options for HTML output # ----------------------- -html_theme = "fdbtheme" -html_theme_path = ["."] +#html_theme = "fdbtheme" +#html_theme_path = ["."] + +html_theme = "bootstrap" +html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() + +# bootstrap theme config + +# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24). +# Path should be relative to the ``_static`` files directory. +#html_logo = "my_logo.png" + +# Theme options are theme-specific and customize the look and feel of a +# theme further. +html_theme_options = { + # Navigation bar title. (Default: ``project`` value) + #'navbar_title': "FDB", + + # Tab name for entire site. (Default: "Site") + 'navbar_site_name': "Content", + + # A list of tuples containing pages or urls to link to. + # Valid tuples should be in the following forms: + # (name, page) # a link to a page + # (name, "/aa/bb", 1) # a link to an arbitrary relative url + # (name, "http://example.com", True) # arbitrary absolute url + # Note the "1" or "True" value above as the third argument to indicate + # an arbitrary url. + 'navbar_links': [ + ("Usage Guide", "usage-guide"), + ("Reference", "reference"), + ("Index", "genindex"), + ], + + # Render the next and previous page links in navbar. (Default: true) + #'navbar_sidebarrel': True, + + # Render the current pages TOC in the navbar. (Default: true) + #'navbar_pagenav': True, + + # Tab name for the current pages TOC. (Default: "Page") + #'navbar_pagenav_name': "Page", + + # Global TOC depth for "site" navbar tab. (Default: 1) + # Switching to -1 shows all levels. + 'globaltoc_depth': 3, + + # Include hidden TOCs in Site navbar? + # + # Note: If this is "false", you cannot have mixed ``:hidden:`` and + # non-hidden ``toctree`` directives in the same page, or else the build + # will break. + # + # Values: "true" (default) or "false" + 'globaltoc_includehidden': "true", + + # HTML navbar class (Default: "navbar") to attach to
element. + # For black navbar, do "navbar navbar-inverse" + 'navbar_class': "navbar navbar-inverse", + + # Fix navigation bar to top of page? + # Values: "true" (default) or "false" + 'navbar_fixed_top': "true", + + # Location of link to source. + # Options are "nav" (default), "footer" or anything else to exclude. + 'source_link_position': "none", + + # Bootswatch (http://bootswatch.com/) theme. + # + # Options are nothing (default) or the name of a valid theme + # such as "cosmo" or "sandstone". + # + # The set of valid themes depend on the version of Bootstrap + # that's used (the next config option). + # + # Currently, the supported themes are: + # - Bootstrap 2: https://bootswatch.com/2 + # - Bootstrap 3: https://bootswatch.com/3 + #'bootswatch_theme': "united", # cerulean, flatly, lumen, materia, united, yeti + 'bootswatch_theme': "cerulean", + + # Choose Bootstrap version. + # Values: "3" (default) or "2" (in quotes) + 'bootstrap_version': "2", +} # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths diff -Nru fdb-1.6.1+dfsg1/sphinx/differences-from-kdb.txt fdb-2.0.0/sphinx/differences-from-kdb.txt --- fdb-1.6.1+dfsg1/sphinx/differences-from-kdb.txt 2016-11-30 11:17:37.000000000 +0000 +++ fdb-2.0.0/sphinx/differences-from-kdb.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -============================ -Differences from KInterbasDB -============================ - -No need for initialization -========================== - -FDB doesn't support various configurations of automatic type translations like -KDB, so it's no longer necessary to initialize the driver before any feature is -used. - -Distributed transactions -======================== - -Support for :ref:`Distributed Transactions ` works slightly differently than in KDB. FDB uses :class:`~fdb.ConnectionGroup` class like KDB with the same interface, but DT is not bound to main transaction of individual connections managed by group. That means that :class:`~fdb.Cursor` instances obtained from :class:`~fdb.Connection` don't work in DT if connection is part of ConnectionGroup, but work normally in connection context. To get Cursor for specific connection that works in DT, use :meth:`fdb.ConnectionGroup.cursor()` method and pass the connection as parameter. We believe that this arrangement is more logical and flexible than KDB's way. - -Transaction context for cursor objects depends on how cursor is obtained/created: - -a) :meth:`fdb.Connection.cursor()` - Works in context of "main" transaction for connection. -b) :meth:`fdb.Transaction.cursor()` - Works in context of this transaction. -c) :meth:`fdb.ConnectionGroup.cursor()` - Works in context of Distributed Transaction - -Stream BLOBs -============ - -Firebird supports two types of BLOBs, stream and segmented. The database stores -segmented BLOBs in chunks. Each chunk starts with a two byte length indicator -followed by however many bytes of data were passed as a segment. Stream BLOBs -are stored as a continuous array of data bytes with no length indicators included. -Both types of BLOBs could be accessed by the same API functions, but only stream -BLOBs support seek operation (via `isc_seek_blob function`). - -FDB implements stream BLOBs as file-like objects. On input, you can simply pass -any file-like object (only 'read' method is required) as parameter value for BLOB -column. For example: - -.. code-block:: python - - f = open('filename.ext', 'rb') - cur.execute('insert into T (MyBLOB) values (?)',[f]) - f.close() - -On output, stream BLOBs are represented by BlobReader instances on request. To -request streamed access to BLOB, you have to use prepared statement for your query -and call its `set_stream_blob(column_name)` method. Stream access is not allowed -for cursors because it would lead to dangerous situations (BlobReader life-time -management) and anomalies (stream access when it's not required). Example: - -.. code-block:: python - - p = cursor.prep('select first 1 MyBLOB from T') - p.set_stream_blob('MyBLOB') - cur.execute(p) - row = cur.fetchone() - blob_reader = row[1] - print blob_reader.readlines() - blob_reader.close() - -Whenever you use stream access to BLOB, FDB opens or creates the underlying BLOB -value as stream one. On input it means that true stream BLOB is created in database, but on output it depends on how BLOB value was actually created. If BLOB was created as stream one, you can use the seek method of BlobReader, but if it was -created as regular BLOB, any call to seek will raise an error:: - - SQLCODE: -685 - - invalid ARRAY or BLOB operation - - invalid BLOB type for operation - -You can read BLOBs created as stream ones as fully materialized, and regular ones -in stream mode (without seek) without any problems, and that same apply for -input - you can create values in the same column as stream or regular ones -interchangeably. From your point of view, stream BLOBs are just different -interface to BLOB values, with single exception - `BlobReader.seek()` will throw -an exception if you'd call it on BLOB value that was not created as stream BLOB. - -To work with stream BLOBs, you don't need to use `cursor.set_type_trans_in/out` -methods like in KDB, i.e. calls to: - -.. code-block:: python - - cur.set_type_trans_in ({'BLOB': {'mode': 'stream'}}) - cur.set_type_trans_out({'BLOB': {'mode': 'stream'}}) - -To write (create) stream BLOB value, simply pass file-like object as parameter -to your INSERT/UPDATE statements where BLOB value is expected. To read BLOB -value as stream, use prepared statement and register interest to get BlobReader -instead fully materialized value via set_stream_blob() calls for each BLOB value -(column name) you want to get this way. - -:class:`~fdb.BlobReader` supports iteration protocol, and read(), readline(), readlines(), -seek(), tell(), flush() (as noop) and close() methods. It does NOT support chunks() -method of KInterbasDB.BlobReader. - -It is not strictly necessary to close BlobReader instances explicitly. A BlobReader object will be automatically closed by its __del__ method when it goes out of scope, or when its Connection, PreparedStatement closes, whichever comes first. However, it is always a better idea to close resources explicitly (via try...finally) than to rely on artifacts of the Python implementation. You will also encounter errors if BLOB value was deleted from database before BlobReader is closed, so the odds that this may happen are higher if you do not close it explicitly. - -Services API -============ - -Support for Firebird Services was :ref:`completelly reworked ` in FDB. diff -Nru fdb-1.6.1+dfsg1/sphinx/getting-started.txt fdb-2.0.0/sphinx/getting-started.txt --- fdb-1.6.1+dfsg1/sphinx/getting-started.txt 2016-11-30 11:16:54.000000000 +0000 +++ fdb-2.0.0/sphinx/getting-started.txt 2018-04-26 14:39:03.000000000 +0000 @@ -1,41 +1,36 @@ -######################## -Getting Started with FDB -######################## +############### +Getting Started +############### Installation ************ -FDB is written as pure-Python module on top of Firebird client library (fbclient.so/dll) using ctypes_, so *make sure you have Firebird client properly installed before you try to install FDB*, otherwise the installation will fail. FDB supports Firebird version 2.0 and higher. +FDB is written as pure-Python module (requires Python 2.7 or 3.4+) on top of Firebird client library (fbclient.so/dll) using ctypes_, so **make sure you have Firebird client properly installed before you try to install FDB**, otherwise the installation will fail. FDB supports Firebird version 2.0 and higher. -FDB is distributed as `setuptools`_ package, so you'll need setuptools or -`compatible package `_ installed to -install FDB properly. +FDB is distributed as `setuptools`_ package and the preferred installation method is via pip_ tool. Installation from PYPI_ ======================= -Run easy_install or pip:: +Run pip:: $ pip install fdb -or:: - - $ easy_install fdb Installation from source ======================== Download the source tarball, uncompress it, then run the install command:: - $ curl -O http://pypi.python.org/packages/source/f/fdb/fdb-1.2.tar.gz - $ tar -xzvf fdb-1.2.tar.gz - $ cd fdb-1.2 + $ tar -xzvf fdb-2.0.tar.gz + $ cd fdb-2.0 $ python setup.py install -.. _setuptools: https://bitbucket.org/pypa/setuptools -.. _PYPI: http://pypi.python.org +.. _setuptools: https://pypi.org/project/setuptools/ +.. _PYPI: https://pypi.org/ .. _ctypes: http://docs.python.org/library/ctypes.html +.. _pip: https://pypi.org/project/pip/ Quick-start Guide diff -Nru fdb-1.6.1+dfsg1/sphinx/index.txt fdb-2.0.0/sphinx/index.txt --- fdb-1.6.1+dfsg1/sphinx/index.txt 2016-11-30 11:15:31.000000000 +0000 +++ fdb-2.0.0/sphinx/index.txt 2018-04-26 15:07:58.000000000 +0000 @@ -1,21 +1,18 @@ -.. FDB documentation master file, created by sphinx-quickstart on Wed Jan 7 12:29:48 2009. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -############################### -Welcome to FDB's documentation! -############################### - -FDB is a `Python `__ library package that -implements `Python Database API 2.0`-compliant support for the open source relational database `Firebird® `__. -In addition to the minimal feature set of the standard Python DB API, FDB also exposes nearly the entire native client API of the database engine. + +#################################### +FDB - The Python driver for Firebird +#################################### + +FDB is a `Python`_ library package that implements `Python Database API 2.0`_-compliant support for the open source relational database `Firebird`_ ®. In addition to the minimal feature set of the standard Python DB API, FDB also exposes the entire native (old-style) client API of the database engine and number of additional extensions and enhancements for convenient use of Firebird. + +FDB is developed `under`_ the `Firebird Project`_, and is used internally as key component for `Firebird QA `__. FDB is free -- covered by a permissive BSD-style `license `__ that both commercial and noncommercial users should find agreeable. -This documentation set is not a tutorial on Python, SQL, or Firebird; rather, it is a topical presentation of FDB's feature set, with example code to demonstrate basic usage patterns. For detailed information about Firebird features, see the `Firebird documentation `__, and especially the excellent `The Firebird Book `__ written by Helen Borrie and published by APress. +This documentation set is not a tutorial on Python, SQL, or Firebird; rather, it is a topical presentation of FDB's feature set, with example code to demonstrate basic usage patterns. For detailed information about Firebird features, see the `Firebird documentation `__, and especially the excellent `The Firebird Book `__ written by Helen Borrie and published by IBPhoenix_. -Documentation Contents: -*********************** +Content +******* .. toctree:: :maxdepth: 2 @@ -23,7 +20,6 @@ getting-started usage-guide python-db-api-compliance - differences-from-kdb reference changelog @@ -35,5 +31,11 @@ * :ref:`genindex` * :ref:`modindex` -* :ref:`search` + +.. _IBPhoenix: http://www.ibphoenix.com +.. _Python: http://python.org +.. _Python Database API 2.0: http://www.python.org/dev/peps/pep-0249/ +.. _Firebird: http://www.firebirdsql.org +.. _under: http://www.firebirdsql.org/en/devel-python-driver/ +.. _Firebird Project: http://www.firebirdsql.org diff -Nru fdb-1.6.1+dfsg1/sphinx/python-db-api-compliance.txt fdb-2.0.0/sphinx/python-db-api-compliance.txt --- fdb-1.6.1+dfsg1/sphinx/python-db-api-compliance.txt 2014-11-13 14:55:46.000000000 +0000 +++ fdb-2.0.0/sphinx/python-db-api-compliance.txt 2018-04-26 14:39:03.000000000 +0000 @@ -1,6 +1,6 @@ -##################################### -Compliance to Python Database API 2.0 -##################################### +########################## +Compliance to PyDB API 2.0 +########################## .. currentmodule:: fdb diff -Nru fdb-1.6.1+dfsg1/sphinx/reference.txt fdb-2.0.0/sphinx/reference.txt --- fdb-1.6.1+dfsg1/sphinx/reference.txt 2016-03-29 14:50:13.000000000 +0000 +++ fdb-2.0.0/sphinx/reference.txt 2018-04-26 14:39:03.000000000 +0000 @@ -1,120 +1,394 @@ -============= +############# FDB Reference -============= +############# + +===================== +Main driver namespace +===================== .. module:: fdb :synopsis: Python Database API 2.0 Compliant driver for Firebird -Module globals -============== - -.. data:: __version__ - - Current driver version, string. - -.. data:: apilevel - - String constant stating the supported DB API level (2.0). - -.. data:: threadsafety - - Integer constant stating the level of thread safety the interface supports. - - Curretly `1` = Threads may share the module, but not connections. - -.. data:: paramstyle - - String constant stating the type of parameter marker - formatting expected by the interface. - - `'qmark'` = Question mark style, e.g. '...WHERE name=?' - -.. data:: DESCRIPTION_NAME -.. data:: DESCRIPTION_TYPE_CODE -.. data:: DESCRIPTION_DISPLAY_SIZE -.. data:: DESCRIPTION_INTERNAL_SIZE -.. data:: DESCRIPTION_PRECISION -.. data:: DESCRIPTION_SCALE -.. data:: DESCRIPTION_NULL_OK - - Helper constants for work with :attr:`Cursor.description` content. - -.. data:: ISOLATION_LEVEL_READ_COMMITED_LEGACY - - TPB for R/W WAIT READ COMMITTED NO RECORD VERSION transaction. - -.. data:: ISOLATION_LEVEL_READ_COMMITED - - TPB for R/W WAIT READ COMMITTED RECORD VERSION transaction. - -.. data:: ISOLATION_LEVEL_REPEATABLE_READ -.. data:: ISOLATION_LEVEL_SNAPSHOT - - TPB's for R/W WAIT SNAPSHOT transaction. - -.. data:: ISOLATION_LEVEL_SERIALIZABLE -.. data:: ISOLATION_LEVEL_SNAPSHOT_TABLE_STABILITY - - TPB's for R/W WAIT SNAPSHOT TABLE STABILITY transaction. - -.. data:: ISOLATION_LEVEL_READ_COMMITED_RO - - TPB for R/O WAIT READ COMMITTED RECORD VERSION transaction. - -.. data:: MAX_BLOB_SEGMENT_SIZE - -.. data:: charset_map - - Python dictionary that maps Firebird character set names (key) to Python character sets (value). +Constants +========= -.. data:: ODS_FB_20 -.. data:: ODS_FB_21 -.. data:: ODS_FB_25 +:__version__: Current driver version, string. - ODS version numbers introduced by Firebird engine version. -.. data:: IMPLEMENTATION_NAMES +PyDB API 2.0 globals +-------------------- -.. data:: PROVIDER_NAMES +:apilevel: String constant stating the supported DB API level (2.0). -.. data:: DB_CLASS_NAMES +:threadsafety: Integer constant stating the level of thread safety the interface supports. + Curretly `1` = Threads may share the module, but not connections. + +:paramstyle: String constant stating the type of parameter marker formatting expected by the interface. `'qmark'` = Question mark style, e.g. '...WHERE name=?' + +Constants for work with driver :ref:`hooks ` +---------------------------------------------------------- + +- HOOK_API_LOADED +- HOOK_DATABASE_ATTACHED +- HOOK_DATABASE_ATTACH_REQUEST +- HOOK_DATABASE_DETACH_REQUEST +- HOOK_DATABASE_CLOSED +- HOOK_SERVICE_ATTACHED + +Helper constants for work with :attr:`Cursor.description` content +----------------------------------------------------------------- + +- DESCRIPTION_NAME +- DESCRIPTION_TYPE_CODE +- DESCRIPTION_DISPLAY_SIZE +- DESCRIPTION_INTERNAL_SIZE +- DESCRIPTION_PRECISION +- DESCRIPTION_SCALE +- DESCRIPTION_NULL_OK + +Helper Transaction Parameter Block (TPB) constants +-------------------------------------------------- + +:ISOLATION_LEVEL_READ_COMMITED_LEGACY: R/W WAIT READ COMMITTED NO RECORD VERSION transaction. +:ISOLATION_LEVEL_READ_COMMITED: R/W WAIT READ COMMITTED RECORD VERSION transaction. +:ISOLATION_LEVEL_REPEATABLE_READ: Same as ISOLATION_LEVEL_SNAPSHOT. +:ISOLATION_LEVEL_SNAPSHOT: R/W WAIT SNAPSHOT transaction. +:ISOLATION_LEVEL_SERIALIZABLE: R/W WAIT SERIALIZABLE transaction. +:ISOLATION_LEVEL_SNAPSHOT_TABLE_STABILITY: Same as ISOLATION_LEVEL_SERIALIZABLE. +:ISOLATION_LEVEL_READ_COMMITED_RO: R/O WAIT READ COMMITTED RECORD VERSION transaction. + +:MAX_BLOB_SEGMENT_SIZE: + +:charset_map: Python dictionary that maps Firebird character set names (key) to Python character sets (value). + +ODS version numbers introduced by Firebird engine version +--------------------------------------------------------- + +- ODS_FB_20 +- ODS_FB_21 +- ODS_FB_25 +- ODS_FB_30 + + +Translation dictionaries +------------------------ + +:IMPLEMENTATION_NAMES: Dictionary to map Implementation codes to names + +:PROVIDER_NAMES: Dictionary to map provider codes to names + +:DB_CLASS_NAMES: Dictionary to map database class codes to names + +Firebird API constants and globals +---------------------------------- + +.. hlist:: + :columns: 5 + + * frb_info_att_charset + * isc_dpb_activate_shadow + * isc_dpb_address_path + * isc_dpb_allocation + * isc_dpb_begin_log + * isc_dpb_buffer_length + * isc_dpb_cache_manager + * isc_dpb_cdd_pathname + * isc_dpb_connect_timeout + * isc_dpb_damaged + * isc_dpb_dbkey_scope + * isc_dpb_debug + * isc_dpb_delete_shadow + * isc_dpb_dummy_packet_interval + * isc_dpb_encrypt_key + * isc_dpb_force_write + * isc_dpb_garbage_collect + * isc_dpb_gbak_attach + * isc_dpb_gfix_attach + * isc_dpb_gsec_attach + * isc_dpb_gstat_attach + * isc_dpb_interp + * isc_dpb_lc_ctype + * isc_dpb_lc_messages + * isc_dpb_no_garbage_collect + * isc_dpb_no_reserve + * isc_dpb_num_buffers + * isc_dpb_number_of_users + * isc_dpb_old_dump_id + * isc_dpb_old_file + * isc_dpb_old_file_size + * isc_dpb_old_num_files + * isc_dpb_old_start_file + * isc_dpb_old_start_page + * isc_dpb_old_start_seqno + * isc_dpb_online + * isc_dpb_online_dump + * isc_dpb_overwrite + * isc_dpb_page_size + * isc_dpb_password + * isc_dpb_password_enc + * isc_dpb_reserved + * isc_dpb_sec_attach + * isc_dpb_set_db_charset + * isc_dpb_set_db_readonly + * isc_dpb_set_db_sql_dialect + * isc_dpb_set_page_buffers + * isc_dpb_shutdown + * isc_dpb_shutdown_delay + * isc_dpb_sql_dialect + * isc_dpb_sql_role_name + * isc_dpb_sweep + * isc_dpb_sweep_interval + * isc_dpb_sys_user_name + * isc_dpb_sys_user_name_enc + * isc_dpb_trace + * isc_dpb_user_name + * isc_dpb_verify + * isc_dpb_version1 + * isc_dpb_working_directory + * isc_dpb_no_db_triggers + * isc_dpb_nolinger, + * isc_info_active_tran_count + * isc_info_end + * isc_info_truncated + * isc_info_sql_stmt_type + * isc_info_sql_get_plan + * isc_info_sql_records + * isc_info_req_select_count + * isc_info_req_insert_count + * isc_info_req_update_count + * isc_info_req_delete_count + * isc_info_blob_total_length + * isc_info_blob_max_segment + * isc_info_blob_type + * isc_info_blob_num_segments + * fb_info_page_contents + * isc_info_active_transactions + * isc_info_allocation + * isc_info_attachment_id + * isc_info_backout_count + * isc_info_base_level + * isc_info_bpage_errors + * isc_info_creation_date + * isc_info_current_memory + * isc_info_db_class + * isc_info_db_id + * isc_info_db_provider + * isc_info_db_read_only + * isc_info_db_size_in_pages + * isc_info_db_sql_dialect + * isc_info_delete_count + * isc_info_dpage_errors + * isc_info_expunge_count + * isc_info_fetches + * isc_info_firebird_version + * isc_info_forced_writes + * isc_info_implementation + * isc_info_insert_count + * isc_info_ipage_errors + * isc_info_isc_version + * isc_info_limbo + * isc_info_marks + * isc_info_max_memory + * isc_info_next_transaction + * isc_info_no_reserve + * isc_info_num_buffers + * isc_info_ods_minor_version + * isc_info_ods_version + * isc_info_oldest_active + * isc_info_oldest_snapshot + * isc_info_oldest_transaction + * isc_info_page_errors + * isc_info_page_size + * isc_info_ppage_errors + * isc_info_purge_count + * isc_info_read_idx_count + * isc_info_read_seq_count + * isc_info_reads + * isc_info_record_errors + * isc_info_set_page_buffers + * isc_info_sql_stmt_commit + * isc_info_sql_stmt_ddl + * isc_info_sql_stmt_delete + * isc_info_sql_stmt_exec_procedure + * isc_info_sql_stmt_get_segment + * isc_info_sql_stmt_insert + * isc_info_sql_stmt_put_segment + * isc_info_sql_stmt_rollback + * isc_info_sql_stmt_savepoint + * isc_info_sql_stmt_select + * isc_info_sql_stmt_select_for_upd + * isc_info_sql_stmt_set_generator + * isc_info_sql_stmt_start_trans + * isc_info_sql_stmt_update + * isc_info_sweep_interval + * isc_info_tpage_errors + * isc_info_tra_access + * isc_info_tra_concurrency + * isc_info_tra_consistency + * isc_info_tra_id + * isc_info_tra_isolation + * isc_info_tra_lock_timeout + * isc_info_tra_no_rec_version + * isc_info_tra_oldest_active + * isc_info_tra_oldest_interesting + * isc_info_tra_oldest_snapshot + * isc_info_tra_read_committed + * isc_info_tra_readonly + * isc_info_tra_readwrite + * isc_info_tra_rec_version + * fb_info_tra_dbpath + * isc_info_update_count + * isc_info_user_names + * isc_info_version + * isc_info_writes + * isc_tpb_autocommit + * isc_dpb_version2 + * fb_info_implementation + * fb_info_page_warns + * fb_info_record_warns + * fb_info_bpage_warns + * fb_info_dpage_warns + * fb_info_ipage_warns + * fb_info_ppage_warns + * fb_info_tpage_warns + * fb_info_pip_errors + * fb_info_pip_warns + * isc_tpb_commit_time + * isc_tpb_concurrency + * isc_tpb_consistency + * isc_tpb_exclusive + * isc_tpb_ignore_limbo + * isc_tpb_lock_read + * isc_tpb_lock_timeout + * isc_tpb_lock_write + * isc_tpb_no_auto_undo + * isc_tpb_no_rec_version + * isc_tpb_nowait + * isc_tpb_protected + * isc_tpb_read + * isc_tpb_read_committed + * isc_tpb_rec_version + * isc_tpb_restart_requests + * isc_tpb_shared + * isc_tpb_verb_time + * isc_tpb_version3 + * isc_tpb_wait + * isc_tpb_write + * charset_map + * XSQLDA_PTR + * ISC_SHORT + * ISC_LONG + * ISC_SCHAR + * ISC_UCHAR + * ISC_QUAD + * ISC_DATE + * ISC_TIME + * SHRT_MIN + * SHRT_MAX + * USHRT_MAX + * INT_MIN + * INT_MAX + * LONG_MIN + * LONG_MAX + * SQL_TEXT + * SQL_VARYING + * SQL_SHORT + * SQL_LONG + * SQL_FLOAT + * SQL_DOUBLE + * SQL_D_FLOAT + * SQL_TIMESTAMP + * SQL_BLOB + * SQL_ARRAY + * SQL_QUAD + * SQL_TYPE_TIME + * SQL_TYPE_DATE + * SQL_INT64 + * SQL_BOOLEAN + * SUBTYPE_NUMERIC + * SUBTYPE_DECIMAL + * MAX_BLOB_SEGMENT_SIZE + * ISC_INT64 + * XSQLVAR + * ISC_TEB + * RESULT_VECTOR + * ISC_STATUS + * ISC_STATUS_ARRAY + * ISC_STATUS_PTR + * ISC_EVENT_CALLBACK + * ISC_ARRAY_DESC + * blr_varying + * blr_varying2 + * blr_text + * blr_text2 + * blr_short + * blr_long + * blr_int64 + * blr_float + * blr_d_float + * blr_double + * blr_timestamp + * blr_sql_date + * blr_sql_time + * blr_cstring + * blr_quad + * blr_blob + * blr_bool + * SQLDA_version1 + * isc_segment + * isc_db_handle + * isc_tr_handle + * isc_stmt_handle + * isc_blob_handle + * sys_encoding Exceptions ========== -.. autoexception:: Warning - :show-inheritance: - .. autoexception:: Error :show-inheritance: + :no-inherited-members: .. autoexception:: InterfaceError :show-inheritance: + :no-inherited-members: .. autoexception:: DatabaseError :show-inheritance: + :no-inherited-members: .. autoexception:: DataError :show-inheritance: + :no-inherited-members: .. autoexception:: OperationalError :show-inheritance: + :no-inherited-members: .. autoexception:: IntegrityError :show-inheritance: + :no-inherited-members: .. autoexception:: InternalError :show-inheritance: + :no-inherited-members: .. autoexception:: ProgrammingError :show-inheritance: + :no-inherited-members: .. autoexception:: NotSupportedError :show-inheritance: + :no-inherited-members: .. autoexception:: TransactionConflict :show-inheritance: + :no-inherited-members: + +.. autoexception:: ParseError + :show-inheritance: + :no-inherited-members: This is the exception inheritance layout:: @@ -122,6 +396,7 @@ |__Warning |__Error |__InterfaceError + |__ParseError |__DatabaseError |__DataError |__OperationalError @@ -133,12 +408,33 @@ Functions ========= +connect +------- + .. autofunction:: connect +create_database +--------------- + .. autofunction:: create_database +load_api +-------- + .. autofunction:: load_api +hook-related functions +---------------------- + +.. autofunction:: add_hook + +.. autofunction:: remove_hook + +.. autofunction:: get_hooks + +.. autofunction:: is_dead_proxy + + Classes ======= @@ -146,13 +442,13 @@ ---------- .. autoclass:: Connection - :member-order: groupwise - :members: - :inherited-members: + +ConnectionWithSchema +-------------------- .. autoclass:: ConnectionWithSchema - :member-order: groupwise - :members: + :no-members: + :no-inherited-members: :undoc-members: .. note:: @@ -163,82 +459,52 @@ ------ .. autoclass:: Cursor - :member-order: groupwise - :members: - :inherited-members: :undoc-members: Transaction ----------- .. autoclass:: Transaction - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: PreparedStatement ----------------- .. autoclass:: PreparedStatement - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: ConnectionGroup --------------- .. autoclass:: ConnectionGroup - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: TransactionContext ------------------ .. autoclass:: TransactionContext - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: EventConduit ------------ .. autoclass:: EventConduit - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: BlobReader ---------- .. autoclass:: BlobReader - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: TPB --- .. autoclass:: TPB - :member-order: groupwise - :members: - :undoc-members: TableReservation ---------------- .. autoclass:: TableReservation - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: +ParameterBuffer +--------------- + +.. autoclass:: ParameterBuffer Internally used classes exposed to driver users =============================================== @@ -246,354 +512,459 @@ .. module:: fdb.fbcore :synopsis: Implementation of Firebird driver -RowMapping ----------- +_RowMapping +----------- .. autoclass:: _RowMapping - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: EventBlock ---------- .. autoclass:: EventBlock - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: TableAccessStats ---------------- .. autoclass:: _TableAccessStats - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: .. _services_api: +======== Services ======== .. module:: fdb.services :synopsis: Submodule for work with Firebird Services -Module globals --------------- +Constants +========= -.. data:: SHUT_LEGACY +`shutdown_mode` codes for :meth:`Connection.shutdown` and :meth:`Connection.bring_online` +----------------------------------------------------------------------------------------- -.. data:: SHUT_NORMAL +- SHUT_LEGACY +- SHUT_NORMAL +- SHUT_MULTI +- SHUT_SINGLE +- SHUT_FULL + +`shutdown_method` codes for :meth:`Connection.shutdown` +------------------------------------------------------- + +- SHUT_FORCE +- SHUT_DENY_NEW_TRANSACTIONS +- SHUT_DENY_NEW_ATTACHMENTS + +`mode` codes for :meth:`Connection.setWriteMode` +------------------------------------------------ + +- WRITE_FORCED +- WRITE_BUFFERED + +`mode` codes for :meth:`Connection.setAccessMode` +------------------------------------------------- + +- ACCESS_READ_WRITE +- ACCESS_READ_ONLY + +:meth:`Connection.get_server_capabilities` return codes +------------------------------------------------------- + +- CAPABILITY_MULTI_CLIENT +- CAPABILITY_REMOTE_HOP +- CAPABILITY_SERVER_CONFIG +- CAPABILITY_QUOTED_FILENAME +- CAPABILITY_NO_SERVER_SHUTDOWN + +'stats' codes for :meth:`Connection.backup`/:meth:`Connection.restore` +---------------------------------------------------------------------- + +- STATS_TOTAL_TIME +- STATS_TIME_DELTA +- STATS_PAGE_READS +- STATS_PAGE_WRITES -.. data:: SHUT_MULTI -.. data:: SHUT_SINGLE +Functions +========= -.. data:: SHUT_FULL +connect +------- - These constants are to be passed as the `shutdown_mode` parameter to :meth:`Connection.shutdown` - and :meth:`Connection.bring_online`. +.. autofunction:: connect +Classes +======= -.. data:: SHUT_FORCE +Connection +---------- -.. data:: SHUT_DENY_NEW_TRANSACTIONS +.. autoclass:: Connection -.. data:: SHUT_DENY_NEW_ATTACHMENTS +User +---- - These constants are to be passed as the `shutdown_method` parameter to :meth:`Connection.shutdown` +.. autoclass:: User -.. data:: WRITE_FORCED -.. data:: WRITE_BUFFERED +=============== +Database schema +=============== - These constants are to be passed as the `mode` parameter to :meth:`Connection.setWriteMode` +.. module:: fdb.schema + :synopsis: Submodule for work with database metadata (schema) -.. data:: ACCESS_READ_WRITE +Module globals +============== -.. data:: ACCESS_READ_ONLY +Firebird field type codes +------------------------- - These constants are to be passed as the `mode` parameter to :meth:`Connection.setAccessMode` +.. hlist:: + :columns: 6 -.. data:: CAPABILITY_MULTI_CLIENT + - FBT_SMALLINT + - FBT_INTEGER + - FBT_QUAD + - FBT_FLOAT + - FBT_CHAR + - FBT_DOUBLE_PRECISION + - FBT_DATE + - FBT_VARCHAR + - FBT_CSTRING + - FBT_BLOB_ID + - FBT_BLOB + - FBT_SQL_DATE + - FBT_SQL_TIME + - FBT_SQL_TIMESTAMP + - FBT_BIGINT + - FBT_BOOLEAN -.. data:: CAPABILITY_REMOTE_HOP +Trigger masks +------------- -.. data:: CAPABILITY_SERVER_CONFIG +.. hlist:: + :columns: 6 -.. data:: CAPABILITY_QUOTED_FILENAME + - TRIGGER_TYPE_SHIFT + - TRIGGER_TYPE_MASK + - TRIGGER_TYPE_DML + - TRIGGER_TYPE_DB + - TRIGGER_TYPE_DDL -.. data:: CAPABILITY_NO_SERVER_SHUTDOWN +Trigger type codes +------------------ - These constants are return values of :meth:`Connection.get_server_capabilities` +.. hlist:: + :columns: 4 -.. data:: STATS_TOTAL_TIME + - DDL_TRIGGER_ANY + - DDL_TRIGGER_CREATE_TABLE + - DDL_TRIGGER_ALTER_TABLE + - DDL_TRIGGER_DROP_TABLE + - DDL_TRIGGER_CREATE_PROCEDURE + - DDL_TRIGGER_ALTER_PROCEDURE + - DDL_TRIGGER_DROP_PROCEDURE + - DDL_TRIGGER_CREATE_FUNCTION + - DDL_TRIGGER_ALTER_FUNCTION + - DDL_TRIGGER_DROP_FUNCTION + - DDL_TRIGGER_CREATE_TRIGGER + - DDL_TRIGGER_ALTER_TRIGGER + - DDL_TRIGGER_DROP_TRIGGER + - DDL_TRIGGER_CREATE_EXCEPTION + - DDL_TRIGGER_ALTER_EXCEPTION + - DDL_TRIGGER_DROP_EXCEPTION + - DDL_TRIGGER_CREATE_VIEW + - DDL_TRIGGER_ALTER_VIEW + - DDL_TRIGGER_DROP_VIEW + - DDL_TRIGGER_CREATE_DOMAIN + - DDL_TRIGGER_ALTER_DOMAIN + - DDL_TRIGGER_DROP_DOMAIN + - DDL_TRIGGER_CREATE_ROLE + - DDL_TRIGGER_ALTER_ROLE + - DDL_TRIGGER_DROP_ROLE + - DDL_TRIGGER_CREATE_INDEX + - DDL_TRIGGER_ALTER_INDEX + - DDL_TRIGGER_DROP_INDEX + - DDL_TRIGGER_CREATE_SEQUENCE + - DDL_TRIGGER_ALTER_SEQUENCE + - DDL_TRIGGER_DROP_SEQUENCE + - DDL_TRIGGER_CREATE_USER + - DDL_TRIGGER_ALTER_USER + - DDL_TRIGGER_DROP_USER + - DDL_TRIGGER_CREATE_COLLATION + - DDL_TRIGGER_DROP_COLLATION + - DDL_TRIGGER_ALTER_CHARACTER_SET + - DDL_TRIGGER_CREATE_PACKAGE + - DDL_TRIGGER_ALTER_PACKAGE + - DDL_TRIGGER_DROP_PACKAGE + - DDL_TRIGGER_CREATE_PACKAGE_BODY + - DDL_TRIGGER_DROP_PACKAGE_BODY + - DDL_TRIGGER_CREATE_MAPPING + - DDL_TRIGGER_ALTER_MAPPING + - DDL_TRIGGER_DROP_MAPPING + +Collation parameters codes +-------------------------- + +- COLLATION_PAD_SPACE +- COLLATION_CASE_INSENSITIVE +- COLLATION_ACCENT_INSENSITIVE -.. data:: STATS_TIME_DELTA +Index type names +---------------- -.. data:: STATS_PAGE_READS +- INDEX_TYPE_ASCENDING +- INDEX_TYPE_DESCENDING -.. data:: STATS_PAGE_WRITES +Relation type codes +------------------- - These constants are options for :meth:`Connection.backup`/:meth:`Connection.restore` 'stats' parameter. +- RELATION_TYPE_TABLE +- RELATION_TYPE_VIEW +- RELATION_TYPE_GTT +- RELATION_TYPE_GTT_PRESERVE +- RELATION_TYPE_GTT_DELETE + +Procedure parameter type codes +------------------------------ + +- PROCPAR_DATATYPE +- PROCPAR_DOMAIN +- PROCPAR_TYPE_OF_DOMAIN +- PROCPAR_TYPE_OF_COLUMN + + +Section codes for :meth:`Schema.get_metadata_ddl` +------------------------------------------------- + +.. hlist:: + :columns: 4 + + - SCRIPT_COLLATIONS + - SCRIPT_CHARACTER_SETS + - SCRIPT_UDFS + - SCRIPT_GENERATORS + - SCRIPT_EXCEPTIONS + - SCRIPT_DOMAINS + - SCRIPT_PACKAGE_DEFS + - SCRIPT_FUNCTION_DEFS + - SCRIPT_PROCEDURE_DEFS + - SCRIPT_TABLES + - SCRIPT_PRIMARY_KEYS + - SCRIPT_UNIQUE_CONSTRAINTS + - SCRIPT_CHECK_CONSTRAINTS + - SCRIPT_FOREIGN_CONSTRAINTS + - SCRIPT_INDICES + - SCRIPT_VIEWS + - SCRIPT_PACKAGE_BODIES + - SCRIPT_PROCEDURE_BODIES + - SCRIPT_FUNCTION_BODIES + - SCRIPT_TRIGGERS + - SCRIPT_ROLES + - SCRIPT_GRANTS + - SCRIPT_COMMENTS + - SCRIPT_SHADOWS + - SCRIPT_SET_GENERATORS + - SCRIPT_INDEX_DEACTIVATIONS + - SCRIPT_INDEX_ACTIVATIONS + - SCRIPT_TRIGGER_DEACTIVATIONS + - SCRIPT_TRIGGER_ACTIVATIONS + +Lists and disctionary maps +-------------------------- + +:COLUMN_TYPES: Dictionary map from filed type codes to type names +:INTEGRAL_SUBTYPES: List of integral type names, works as map +:INDEX_TYPES: List of index types +:BLOB_SUBTYPES: List of blob type names, works as map +:TRIGGER_PREFIX_TYPES: List of trigger prefix type names, works as map +:TRIGGER_SUFFIX_TYPES: List of trigger suffix type names, works as map +:TRIGGER_DB_TYPES: List of db trigger type names, works as map +:TRIGGER_DDL_TYPES: List of DLL trigger type names, works as map +:RESERVED: List of reserved Firebird words +:NON_RESERVED: List of non-reserved Firebird words +:SCRIPT_DEFAULT_ORDER: List of default sections (in order) for :meth:`Schema.get_metadata_ddl` Functions ---------- - -.. autofunction:: connect +========= -Connection +get_grants ---------- -.. autoclass:: Connection - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: - -User ----- - -.. autoclass:: User - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: - -Database schema -=============== - -.. module:: fdb.schema - :synopsis: Submodule for work with database metadata (schema) +.. autofunction:: get_grants -Functions +iskeyword --------- -.. autofunction:: get_grants +.. autofunction:: iskeyword -.. autofunction:: isKeyword +escape_single_quotes +-------------------- +.. autofunction:: escape_single_quotes + +Classes +======= Schema ------ .. autoclass:: Schema - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: BaseSchemaItem -------------- .. autoclass:: BaseSchemaItem - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Collation --------- .. autoclass:: Collation - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: CharacterSet ------------ .. autoclass:: CharacterSet - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: DatabaseException ----------------- .. autoclass:: DatabaseException - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Sequence -------- .. autoclass:: Sequence - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Index ----- .. autoclass:: Index - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: TableColumn ----------- .. autoclass:: TableColumn - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: ViewColumn ---------- .. autoclass:: ViewColumn - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Domain ------ .. autoclass:: Domain - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Dependency ---------- .. autoclass:: Dependency - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Constraint ---------- .. autoclass:: Constraint - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Table ----- .. autoclass:: Table - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: View ---- .. autoclass:: View - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Trigger ------- .. autoclass:: Trigger - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: ProcedureParameter ------------------ .. autoclass:: ProcedureParameter - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Procedure --------- .. autoclass:: Procedure - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Role ---- .. autoclass:: Role - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: FunctionArgument ---------------- .. autoclass:: FunctionArgument - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Function -------- .. autoclass:: Function - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: DatabaseFile ------------ .. autoclass:: DatabaseFile - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Shadow ------ .. autoclass:: Shadow - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: Privilege -------------- +--------- .. autoclass:: Privilege - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: + +Package +------- + +.. autoclass:: Package + :show-inheritance: -SchemaVisitor +BackupHistory ------------- -.. autoclass:: SchemaVisitor - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: +.. autoclass:: BackupHistory + :show-inheritance: + +Filter +------ + +.. autoclass:: Filter + :show-inheritance: +====================== Monitoring information ====================== @@ -601,129 +972,360 @@ :synopsis: Submodule for work with database monitoring tables Constants ---------- +========= + +Shutdown modes for :attr:`DatabaseInfo.shutdown_mode` +----------------------------------------------------- -.. data:: SHUTDOWN_MODE_ONLINE -.. data:: SHUTDOWN_MODE_MULTI -.. data:: SHUTDOWN_MODE_SINGLE -.. data:: SHUTDOWN_MODE_FULL +- SHUTDOWN_MODE_ONLINE +- SHUTDOWN_MODE_MULTI +- SHUTDOWN_MODE_SINGLE +- SHUTDOWN_MODE_FULL - Shutdown modes for :attr:`DatabaseInfo.shutdown_mode`. +Backup states for :attr:`DatabaseInfo.backup_state` +---------------------------------------------------- -.. data:: BACKUP_STATE_NORMAL -.. data:: BACKUP_STATE_STALLED -.. data:: BACKUP_STATE_MERGE +- BACKUP_STATE_NORMAL +- BACKUP_STATE_STALLED +- BACKUP_STATE_MERGE - Backup states for :attr:`DatabaseInfo.backup_state`. +States for :attr:`AttachmentInfo.state`, :attr:`TransactionInfo.state` and :attr:`StatementInfo.state` +------------------------------------------------------------------------------------------------------ -.. data:: STATE_IDLE -.. data:: STATE_ACTIVE +- STATE_IDLE +- STATE_ACTIVE - States for :attr:`AttachmentInfo.state`, :attr:`TransactionInfo.state`. - and :attr:`StatementInfo.state`. +Isolation modes for :attr:`TransactionInfo.isolation_mode` +---------------------------------------------------------- -.. data:: ISOLATION_CONSISTENCY -.. data:: ISOLATION_CONCURRENCY -.. data:: ISOLATION_READ_COMMITTED_RV -.. data:: ISOLATION_READ_COMMITTED_NO_RV +- ISOLATION_CONSISTENCY +- ISOLATION_CONCURRENCY +- ISOLATION_READ_COMMITTED_RV +- ISOLATION_READ_COMMITTED_NO_RV - Isolation modes for :attr:`TransactionInfo.isolation_mode`. +Special timeout values for :attr:`TransactionInfo.lock_timeout` +--------------------------------------------------------------- -.. data:: INFINITE_WAIT -.. data:: NO_WAIT +- INFINITE_WAIT +- NO_WAIT - Special timeout values for :attr:`TransactionInfo.lock_timeout`. +Group codes for :attr:`IOStatsInfo.group` +----------------------------------------- + +- STAT_DATABASE +- STAT_ATTACHMENT +- STAT_TRANSACTION +- STAT_STATEMENT +- STAT_CALL + +Security database +----------------- -.. data:: STAT_DATABASE -.. data:: STAT_ATTACHMENT -.. data:: STAT_TRANSACTION -.. data:: STAT_STATEMENT -.. data:: STAT_CALL +- SEC_DEFAULT +- SEC_SELF +- SEC_OTHER - Group codes for :attr:`IOStatsInfo.group`. +Classes +======= Monitor ------- .. autoclass:: Monitor - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: BaseInfoItem ------------ .. autoclass:: BaseInfoItem - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: DatabaseInfo ------------ .. autoclass:: DatabaseInfo - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: AttachmentInfo -------------- .. autoclass:: AttachmentInfo - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: TransactionInfo --------------- .. autoclass:: TransactionInfo - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: StatementInfo ------------- .. autoclass:: StatementInfo - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: CallStackInfo ------------- .. autoclass:: CallStackInfo - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: IOStatsInfo ----------- .. autoclass:: IOStatsInfo - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: + +TableStatsInfo +-------------- + +.. autoclass:: TableStatsInfo + :show-inheritance: ContextVariableInfo ------------------- .. autoclass:: ContextVariableInfo - :member-order: groupwise - :members: - :inherited-members: - :undoc-members: + :show-inheritance: + + +====================== +Firebird trace & audit +====================== +.. module:: fdb.trace + :synopsis: Firebird Trace & Audit log processing + +Constants +========= + +Trace event status codes +------------------------ + +- STATUS_OK +- STATUS_FAILED +- STATUS_UNAUTHORIZED +- STATUS_UNKNOWN + +Trace event codes +----------------- + +.. note: Also works as index to :data:`EVENTS` list. + +.. hlist:: + :columns: 5 + + - EVENT_TRACE_INIT + - EVENT_TRACE_SUSPEND + - EVENT_TRACE_END + - EVENT_CREATE_DATABASE + - EVENT_DROP_DATABASE + - EVENT_ATTACH + - EVENT_DETACH + - EVENT_TRANSACTION_START + - EVENT_COMMIT + - EVENT_ROLLBACK + - EVENT_COMMIT_R + - EVENT_ROLLBACK_R + - EVENT_STMT_PREPARE + - EVENT_STMT_START + - EVENT_STMT_END + - EVENT_STMT_FREE + - EVENT_STMT_CLOSE + - EVENT_TRG_START + - EVENT_TRG_END + - EVENT_PROC_START + - EVENT_PROC_END + - EVENT_SVC_START + - EVENT_SVC_ATTACH + - EVENT_SVC_DETACH + - EVENT_SVC_QUERY + - EVENT_SET_CONTEXT + - EVENT_ERROR + - EVENT_WARNING + - EVENT_SWEEP_START + - EVENT_SWEEP_PROGRESS + - EVENT_SWEEP_FINISH + - EVENT_SWEEP_FAILED + - EVENT_BLR_COMPILE + - EVENT_BLR_EXECUTE + - EVENT_DYN_EXECUTE + - EVENT_UNKNOWN + +:EVENTS: List of trace event names in order matching their numeric codes + +Classes +======= + +Named tuples for information packages +------------------------------------- + +.. autofunction:: AttachmentInfo +.. autofunction:: TransactionInfo +.. autofunction:: ServiceInfo +.. autofunction:: SQLInfo +.. autofunction:: ParamInfo +.. autofunction:: AccessTuple + +Named tuples for individual trace events +---------------------------------------- + +.. autofunction:: EventTraceInit +.. autofunction:: EventTraceSuspend +.. autofunction:: EventTraceFinish +.. autofunction:: EventCreate +.. autofunction:: EventDrop +.. autofunction:: EventAttach +.. autofunction:: EventDetach +.. autofunction:: EventTransactionStart +.. autofunction:: EventCommit +.. autofunction:: EventRollback +.. autofunction:: EventCommitRetaining +.. autofunction:: EventRollbackRetaining +.. autofunction:: EventPrepareStatement +.. autofunction:: EventStatementStart +.. autofunction:: EventStatementFinish +.. autofunction:: EventFreeStatement +.. autofunction:: EventCloseCursor +.. autofunction:: EventTriggerStart +.. autofunction:: EventTriggerFinish +.. autofunction:: EventProcedureStart +.. autofunction:: EventProcedureFinish +.. autofunction:: EventServiceAttach +.. autofunction:: EventServiceDetach +.. autofunction:: EventServiceStart +.. autofunction:: EventServiceQuery +.. autofunction:: EventSetContext +.. autofunction:: EventError +.. autofunction:: EventServiceError +.. autofunction:: EventWarning +.. autofunction:: EventServiceWarning +.. autofunction:: EventSweepStart +.. autofunction:: EventSweepProgress +.. autofunction:: EventSweepFinish +.. autofunction:: EventSweepFailed +.. autofunction:: EventBLRCompile +.. autofunction:: EventBLRExecute +.. autofunction:: EventDYNExecute +.. autofunction:: EventUnknown + +TraceParser +----------- + +.. autoclass:: TraceParser + + +=============== +GSTAT protocols +=============== + +.. module:: fdb.gstat + :synopsis: Firebird gstat output processing + +Module globals +============== + +GSTAT version +------------- +- GSTAT_25 +- GSTAT_30 + +Database attribute codes +------------------------ + +- ATTR_FORCE_WRITE +- ATTR_NO_RESERVE +- ATTR_NO_SHARED_CACHE +- ATTR_ACTIVE_SHADOW +- ATTR_SHUTDOWN_MULTI +- ATTR_SHUTDOWN_SINGLE +- ATTR_SHUTDOWN_FULL +- ATTR_READ_ONLY +- ATTR_BACKUP_LOCK +- ATTR_BACKUP_MERGE +- ATTR_BACKUP_WRONG + +.. note:: Also works as index to ATTRIBUTES. + +:ATTRIBUTES: List with database attribute names + +Functions +========= + +empty_str +--------- + +.. autofunction:: empty_str + +parse +----- + +.. autofunction:: parse + +Classes +======= + +Named tuples +------------ + +.. autofunction:: FillDistribution +.. autofunction:: Encryption + +StatDatabase +------------ + +.. autoclass:: StatDatabase + :show-inheritance: + +StatTable +--------- + +.. autoclass:: StatTable + :show-inheritance: + +StatTable3 +---------- + +.. autoclass:: StatTable3 + :show-inheritance: + +StatIndex +--------- + +.. autoclass:: StatIndex + :show-inheritance: + +StatIndex3 +---------- + +.. autoclass:: StatIndex3 + :show-inheritance: + +=================== +Firebird server log +=================== + +.. module:: fdb.log + :synopsis: Firebird server log processing + +Functions +========= + +parse +----- + +.. autofunction:: parse + +Classes +======= + +Named tuples +------------ + +.. autofunction:: LogEntry + + +========= Utilities ========= @@ -732,56 +1334,1505 @@ Functions ---------- +========= .. autofunction:: update_meta .. autofunction:: iter_class_properties .. autofunction:: iter_class_variables .. autofunction:: embed_attributes +Classes +======= + LateBindingProperty ------------------- .. autoclass:: LateBindingProperty - :member-order: groupwise - :members: + :show-inheritance: + :no-inherited-members: Iterator -------- .. autoclass:: Iterator - :member-order: groupwise :members: __iter__, next, __next__ - :inherited-members: - :undoc-members: EmbeddedProperty ---------------- .. autoclass:: EmbeddedProperty - :member-order: groupwise - :members: + :show-inheritance: + :no-inherited-members: EmbeddedAttribute ----------------- .. autoclass:: EmbeddedAttribute - :member-order: groupwise - :members: + :show-inheritance: + :no-inherited-members: + +ObjectList +---------- + +.. autoclass:: ObjectList + :show-inheritance: + :no-inherited-members: + +Visitable +--------- + +.. autoclass:: Visitable + +Visitor +------- + +.. autoclass:: Visitor -.. module:: fdb.blr - :synopsis: Python ctypes interface to Firebird client library (BLR) .. module:: fdb.ibase :synopsis: Python ctypes interface to Firebird client library +=========================================== ctypes interface to Firebird client library =========================================== :ref:`ctypes ` interface to `fbclient.so/dll` is defined in submodule :mod:`fdb.ibase` and :mod:`fdb.blr`. + +The :mod:`fdb.ibase` is the main module for Firebird API. The :mod:`fdb.blr` module contains only constants related to Firebird's low-level Binary Language Representation (BLR). + +Constants +========= + +C integer limit constants +------------------------- + +.. hlist:: + :columns: 6 + + - SHRT_MIN + - SHRT_MAX + - USHRT_MAX + - INT_MIN + - INT_MAX + - UINT_MAX + - LONG_MIN + - LONG_MAX + - SSIZE_T_MIN + - SSIZE_T_MAX + +Type codes +---------- + +.. hlist:: + :columns: 6 + + - SQL_TEXT + - SQL_VARYING + - SQL_SHORT + - SQL_LONG + - SQL_FLOAT + - SQL_DOUBLE + - SQL_D_FLOAT + - SQL_TIMESTAMP + - SQL_BLOB + - SQL_ARRAY + - SQL_QUAD + - SQL_TYPE_TIME + - SQL_TYPE_DATE + - SQL_INT64 + - SQL_BOOLEAN + - SQL_NULL + - SUBTYPE_NUMERIC + - SUBTYPE_DECIMAL + +Internal type codes (for example used by ARRAY descriptor) +---------------------------------------------------------- + +.. hlist:: + :columns: 6 + + - blr_text + - blr_text2 + - blr_short + - blr_long + - blr_quad + - blr_float + - blr_double + - blr_d_float + - blr_timestamp + - blr_varying + - blr_varying2 + - blr_blob + - blr_cstring + - blr_cstring2 + - blr_blob_id + - blr_sql_date + - blr_sql_time + - blr_int64 + - blr_blob2 + - blr_domain_name + - blr_domain_name2 + - blr_not_nullable + - blr_column_name + - blr_column_name2 + - blr_bool + + +Database parameter block stuff +------------------------------ + +.. hlist:: + :columns: 5 + + - isc_dpb_version1 + - isc_dpb_version2 + - isc_dpb_cdd_pathname + - isc_dpb_allocation + - isc_dpb_page_size + - isc_dpb_num_buffers + - isc_dpb_buffer_length + - isc_dpb_debug + - isc_dpb_garbage_collect + - isc_dpb_verify + - isc_dpb_sweep + - isc_dpb_dbkey_scope + - isc_dpb_number_of_users + - isc_dpb_trace + - isc_dpb_no_garbage_collect + - isc_dpb_damaged + - isc_dpb_sys_user_name + - isc_dpb_encrypt_key + - isc_dpb_activate_shadow + - isc_dpb_sweep_interval + - isc_dpb_delete_shadow + - isc_dpb_force_write + - isc_dpb_begin_log + - isc_dpb_quit_log + - isc_dpb_no_reserve + - isc_dpb_user_name + - isc_dpb_password + - isc_dpb_password_enc + - isc_dpb_sys_user_name_enc + - isc_dpb_interp + - isc_dpb_online_dump + - isc_dpb_old_file_size + - isc_dpb_old_num_files + - isc_dpb_old_file + - isc_dpb_old_start_page + - isc_dpb_old_start_seqno + - isc_dpb_old_start_file + - isc_dpb_old_dump_id + - isc_dpb_lc_messages + - isc_dpb_lc_ctype + - isc_dpb_cache_manager + - isc_dpb_shutdown + - isc_dpb_online + - isc_dpb_shutdown_delay + - isc_dpb_reserved + - isc_dpb_overwrite + - isc_dpb_sec_attach + - isc_dpb_connect_timeout + - isc_dpb_dummy_packet_interval + - isc_dpb_gbak_attach + - isc_dpb_sql_role_name + - isc_dpb_set_page_buffers + - isc_dpb_working_directory + - isc_dpb_sql_dialect + - isc_dpb_set_db_readonly + - isc_dpb_set_db_sql_dialect + - isc_dpb_gfix_attach + - isc_dpb_gstat_attach + - isc_dpb_set_db_charset + - isc_dpb_gsec_attach + - isc_dpb_address_path + - isc_dpb_process_id + - isc_dpb_no_db_triggers + - isc_dpb_trusted_auth + - isc_dpb_process_name + - isc_dpb_trusted_role + - isc_dpb_org_filename + - isc_dpb_utf8_filename + - isc_dpb_ext_call_depth + - isc_dpb_auth_block + - isc_dpb_remote_protocol + - isc_dpb_client_version + - isc_dpb_host_name + - isc_dpb_os_user + - isc_dpb_specific_auth_data + - isc_dpb_auth_plugin_list + - isc_dpb_auth_plugin_name + - isc_dpb_config + - isc_dpb_nolinger + - isc_dpb_reset_icu + - isc_dpb_map_attach + +Info structural codes +--------------------- + +.. hlist:: + :columns: 6 + + - isc_info_end + - isc_info_truncated + - isc_info_error + - isc_info_data_not_ready + - isc_info_length + - isc_info_flag_end + + + +DB Info item codes +------------------ + +:isc_info_db_id: [db_filename,site_name[,site_name...]] +:isc_info_reads: number of page reads +:isc_info_writes: number of page writes +:isc_info_fetches: number of reads from the memory buffer cache +:isc_info_marks: number of writes to the memory buffer cache +:isc_info_implementation: (implementation code, implementation class) +:isc_info_isc_version: interbase server version identification string +:isc_info_base_level: capability version of the server +:isc_info_page_size: database page size +:isc_info_num_buffers: number of memory buffers currently allocated +:isc_info_limbo: limbo transactions +:isc_info_current_memory: amount of server memory (in bytes) currently in use +:isc_info_max_memory: maximum amount of memory (in bytes) used at one time since the first process attached to the database +:isc_info_allocation: number of last database page allocated +:isc_info_attachment_id: attachment id number + +all `*_count` codes below return {[table_id]=operation_count,...}; table IDs are in the system table RDB$RELATIONS. + +:isc_info_read_seq_count: number of sequential table scans (row reads) done on each table since the database was last attached +:isc_info_read_idx_count: number of reads done via an index since the database was last attached +:isc_info_insert_count: number of inserts into the database since the database was last attached +:isc_info_update_count: number of database updates since the database was last attached +:isc_info_delete_count: number of database deletes since the database was last attached +:isc_info_backout_count: number of removals of a version of a record +:isc_info_purge_count: number of removals of old versions of fully mature records (records that are committed, so that older ancestor versions are no longer needed) +:isc_info_expunge_count: number of removals of a record and all of its ancestors, for records whose deletions have been committed + +:isc_info_sweep_interval: number of transactions that are committed between sweeps to remove database record versions that are no longer needed +:isc_info_ods_version: On-disk structure (ODS) minor major version number +:isc_info_ods_minor_version: On-disk structure (ODS) minor version number +:isc_info_no_reserve: 20% page space reservation for holding backup versions of modified records: 0=yes, 1=no +:isc_info_forced_writes: mode in which database writes are performed: 0=sync, 1=async +:isc_info_user_names: array of names of all the users currently attached to the database +:isc_info_page_errors: number of page level errors validate found +:isc_info_record_errors: number of record level errors validate found +:isc_info_bpage_errors: number of blob page errors validate found +:isc_info_dpage_errors: number of data page errors validate found +:isc_info_ipage_errors: number of index page errors validate found +:isc_info_ppage_errors: number of pointer page errors validate found +:isc_info_tpage_errors: number of transaction page errors validate found +:isc_info_set_page_buffers: number of memory buffers that should be allocated +:isc_info_db_sql_dialect: dialect of currently attached database +:isc_info_db_read_only: whether the database is read-only (1) or not (0) +:isc_info_db_size_in_pages: number of allocated pages +:frb_info_att_charset: charset of current attachment +:isc_info_db_class: server architecture +:isc_info_firebird_version: firebird server version identification string +:isc_info_oldest_transaction: ID of oldest transaction +:isc_info_oldest_active: ID of oldest active transaction +:isc_info_oldest_snapshot: ID of oldest snapshot transaction +:isc_info_next_transaction: ID of next transaction +:isc_info_db_provider: for firebird is 'isc_info_db_code_firebird' +:isc_info_active_transactions: array of active transaction IDs +:isc_info_active_tran_count: number of active transactions +:isc_info_creation_date: time_t struct representing database creation date & time +:isc_info_db_file_size: added in FB 2.1, nbackup-related - size (in pages) of locked db +:fb_info_page_contents: added in FB 2.5, get raw page contents; takes page_number as parameter; +:fb_info_implementation: (cpu code, OS code, compiler code, flags, implementation class) +:fb_info_page_warns: number of page level warnings validate found +:fb_info_record_warns: number of record level warnings validate found +:fb_info_bpage_warns: number of blob page level warnings validate found +:fb_info_dpage_warns: number of data page level warnings validate found +:fb_info_ipage_warns: number of index page level warnings validate found +:fb_info_ppage_warns: number of pointer page level warnings validate found +:fb_info_tpage_warns: number of trabsaction page level warnings validate found +:fb_info_pip_errors: number of pip page level errors validate found +:fb_info_pip_warns: number of pip page level warnings validate found + +:isc_info_version: = isc_info_isc_version + +Blob information items +---------------------- + +.. hlist:: + :columns: 4 + + - isc_info_blob_num_segments + - isc_info_blob_max_segment + - isc_info_blob_total_length + - isc_info_blob_type + +Transaction information items +----------------------------- + +.. hlist:: + :columns: 4 + + - isc_info_tra_id + - isc_info_tra_oldest_interesting + - isc_info_tra_oldest_snapshot + - isc_info_tra_oldest_active + - isc_info_tra_isolation + - isc_info_tra_access + - isc_info_tra_lock_timeout + - fb_info_tra_dbpath + + +**isc_info_tra_isolation responses:** + +- isc_info_tra_consistency +- isc_info_tra_concurrency +- isc_info_tra_read_committed + +**isc_info_tra_read_committed options:** + +- isc_info_tra_no_rec_version +- isc_info_tra_rec_version + +**isc_info_tra_access responses:** + +- isc_info_tra_readonly +- isc_info_tra_readwrite + +SQL information items +--------------------- + +.. hlist:: + :columns: 4 + + - isc_info_sql_select + - isc_info_sql_bind + - isc_info_sql_num_variables + - isc_info_sql_describe_vars + - isc_info_sql_describe_end + - isc_info_sql_sqlda_seq + - isc_info_sql_message_seq + - isc_info_sql_type + - isc_info_sql_sub_type + - isc_info_sql_scale + - isc_info_sql_length + - isc_info_sql_null_ind + - isc_info_sql_field + - isc_info_sql_relation + - isc_info_sql_owner + - isc_info_sql_alias + - isc_info_sql_sqlda_start + - isc_info_sql_stmt_type + - isc_info_sql_get_plan + - isc_info_sql_records + - isc_info_sql_batch_fetch + - isc_info_sql_relation_alias + - isc_info_sql_explain_plan + - isc_info_sql_stmt_flags + +SQL information return values +----------------------------- + +.. hlist:: + :columns: 4 + + - isc_info_sql_stmt_select + - isc_info_sql_stmt_insert + - isc_info_sql_stmt_update + - isc_info_sql_stmt_delete + - isc_info_sql_stmt_ddl + - isc_info_sql_stmt_get_segment + - isc_info_sql_stmt_put_segment + - isc_info_sql_stmt_exec_procedure + - isc_info_sql_stmt_start_trans + - isc_info_sql_stmt_commit + - isc_info_sql_stmt_rollback + - isc_info_sql_stmt_select_for_upd + - isc_info_sql_stmt_set_generator + - isc_info_sql_stmt_savepoint + +Transaction parameter block stuff +--------------------------------- + +.. hlist:: + :columns: 6 + + - isc_tpb_version1 + - isc_tpb_version3 + - isc_tpb_consistency + - isc_tpb_concurrency + - isc_tpb_shared + - isc_tpb_protected + - isc_tpb_exclusive + - isc_tpb_wait + - isc_tpb_nowait + - isc_tpb_read + - isc_tpb_write + - isc_tpb_lock_read + - isc_tpb_lock_write + - isc_tpb_verb_time + - isc_tpb_commit_time + - isc_tpb_ignore_limbo + - isc_tpb_read_committed + - isc_tpb_autocommit + - isc_tpb_rec_version + - isc_tpb_no_rec_version + - isc_tpb_restart_requests + - isc_tpb_no_auto_undo + - isc_tpb_lock_timeout + +BLOB parameter buffer +--------------------- + +.. hlist:: + :columns: 6 + + - isc_bpb_version1 + - isc_bpb_source_type + - isc_bpb_target_type + - isc_bpb_type + - isc_bpb_source_interp + - isc_bpb_target_interp + - isc_bpb_filter_parameter + - isc_bpb_storage + - isc_bpb_type_segmented + - isc_bpb_type_stream + - isc_bpb_storage_main + - isc_bpb_storage_temp + +Service parameter block stuff +----------------------------- + +.. hlist:: + :columns: 6 + + - isc_spb_current_version + - isc_spb_version + - isc_spb_version3 + - isc_spb_user_name + - isc_spb_sys_user_name + - isc_spb_sys_user_name_enc + - isc_spb_password + - isc_spb_password_enc + - isc_spb_command_line + - isc_spb_dbname + - isc_spb_verbose + - isc_spb_options + - isc_spb_address_path + - isc_spb_process_id + - isc_spb_trusted_auth + - isc_spb_process_name + - isc_spb_trusted_role + - isc_spb_verbint + - isc_spb_auth_block + - isc_spb_auth_plugin_name + - isc_spb_auth_plugin_list + - isc_spb_utf8_filename + - isc_spb_client_version + - isc_spb_remote_protocol + - isc_spb_host_name + - isc_spb_os_user + - isc_spb_config + - isc_spb_expected_db + +**Parameters for isc_action_{add|del|mod|disp)_user:** + +.. hlist:: + :columns: 6 + + - isc_spb_sec_userid + - isc_spb_sec_groupid + - isc_spb_sec_username + - isc_spb_sec_password + - isc_spb_sec_groupname + - isc_spb_sec_firstname + - isc_spb_sec_middlename + - isc_spb_sec_lastname + - isc_spb_sec_admin + +**Parameters for isc_action_svc_backup:** + +.. hlist:: + :columns: 4 + + - isc_spb_bkp_file + - isc_spb_bkp_factor + - isc_spb_bkp_length + - isc_spb_bkp_skip_data + - isc_spb_bkp_stat + - isc_spb_bkp_ignore_checksums + - isc_spb_bkp_ignore_limbo + - isc_spb_bkp_metadata_only + - isc_spb_bkp_no_garbage_collect + - isc_spb_bkp_old_descriptions + - isc_spb_bkp_non_transportable + - isc_spb_bkp_convert + - isc_spb_bkp_expand + - isc_spb_bkp_no_triggers + +**Parameters for isc_action_svc_properties:** + +.. hlist:: + :columns: 4 + + - isc_spb_prp_page_buffers + - isc_spb_prp_sweep_interval + - isc_spb_prp_shutdown_db + - isc_spb_prp_deny_new_attachments + - isc_spb_prp_deny_new_transactions + - isc_spb_prp_reserve_space + - isc_spb_prp_write_mode + - isc_spb_prp_access_mode + - isc_spb_prp_set_sql_dialect + - isc_spb_prp_activate + - isc_spb_prp_db_online + - isc_spb_prp_nolinger + - isc_spb_prp_force_shutdown + - isc_spb_prp_attachments_shutdown + - isc_spb_prp_transactions_shutdown + - isc_spb_prp_shutdown_mode + - isc_spb_prp_online_mode + +**Parameters for isc_spb_prp_shutdown_mode and isc_spb_prp_online_mode:** + +.. hlist:: + :columns: 4 + + - isc_spb_prp_sm_normal + - isc_spb_prp_sm_multi + - isc_spb_prp_sm_single + - isc_spb_prp_sm_full + +**Parameters for isc_spb_prp_reserve_space:** + +.. hlist:: + + - isc_spb_prp_res_use_full + - isc_spb_prp_res + +**Parameters for isc_spb_prp_write_mode:** + +.. hlist:: + + - isc_spb_prp_wm_async + - isc_spb_prp_wm_sync + +**Parameters for isc_action_svc_validate:** + +.. hlist:: + :columns: 5 + + - isc_spb_val_tab_incl + - isc_spb_val_tab_excl + - isc_spb_val_idx_incl + - isc_spb_val_idx_excl + - isc_spb_val_lock_timeout + +**Parameters for isc_spb_prp_access_mode:** + +.. hlist:: + :columns: 5 + + - isc_spb_rpr_commit_trans + - isc_spb_rpr_rollback_trans + - isc_spb_rpr_recover_two_phase + - isc_spb_tra_id + - isc_spb_single_tra_id + - isc_spb_multi_tra_id + - isc_spb_tra_state + - isc_spb_tra_state_limbo + - isc_spb_tra_state_commit + - isc_spb_tra_state_rollback + - isc_spb_tra_state_unknown + - isc_spb_tra_host_site + - isc_spb_tra_remote_site + - isc_spb_tra_db_path + - isc_spb_tra_advise + - isc_spb_tra_advise_commit + - isc_spb_tra_advise_rollback + - isc_spb_tra_advise_unknown + - isc_spb_tra_id_64 + - isc_spb_single_tra_id_64 + - isc_spb_multi_tra_id_64 + - isc_spb_rpr_commit_trans_64 + - isc_spb_rpr_rollback_trans_64 + - isc_spb_rpr_recover_two_phase_64 + + - isc_spb_rpr_validate_db + - isc_spb_rpr_sweep_db + - isc_spb_rpr_mend_db + - isc_spb_rpr_list_limbo_trans + - isc_spb_rpr_check_db + - isc_spb_rpr_ignore_checksum + - isc_spb_rpr_kill_shadows + - isc_spb_rpr_full + - isc_spb_rpr_icu + +**Parameters for isc_action_svc_restore:** + +.. hlist:: + :columns: 4 + + - isc_spb_res_skip_data + - isc_spb_res_buffers + - isc_spb_res_page_size + - isc_spb_res_length + - isc_spb_res_access_mode + - isc_spb_res_fix_fss_data + - isc_spb_res_fix_fss_metadata + - isc_spb_res_stat + - isc_spb_res_metadata_only + - isc_spb_res_deactivate_idx + - isc_spb_res_no_shadow + - isc_spb_res_no_validity + - isc_spb_res_one_at_a_time + - isc_spb_res_replace + - isc_spb_res_create + - isc_spb_res_use_all_space + +**Parameters for isc_spb_res_access_mode:** + +.. hlist:: + + - isc_spb_res_am_readonly + - isc_spb_res_am_readwrite + +**Parameters for isc_info_svc_svr_db_info:** + +.. hlist:: + + - isc_spb_num_att + - isc_spb_num_db + +**Parameters for isc_info_svc_db_stats:** + +.. hlist:: + :columns: 4 + + - isc_spb_sts_data_pages + - isc_spb_sts_db_log + - isc_spb_sts_hdr_pages + - isc_spb_sts_idx_pages + - isc_spb_sts_sys_relations + - isc_spb_sts_record_versions + - isc_spb_sts_table + - isc_spb_sts_nocreation + - isc_spb_sts_encryption + +**Parameters for isc_action_svc_nbak:** + +.. hlist:: + :columns: 4 + + - isc_spb_nbk_level + - isc_spb_nbk_file + - isc_spb_nbk_direct + - isc_spb_nbk_no_triggers + +**Parameters for trace:** + +.. hlist:: + + - isc_spb_trc_id + - isc_spb_trc_name + - isc_spb_trc_cfg + + +Service action items +-------------------- + +.. hlist:: + :columns: 5 + + - isc_action_svc_backup + - isc_action_svc_restore + - isc_action_svc_repair + - isc_action_svc_add_user + - isc_action_svc_delete_user + - isc_action_svc_modify_user + - isc_action_svc_display_user + - isc_action_svc_properties + - isc_action_svc_add_license + - isc_action_svc_remove_license + - isc_action_svc_db_stats + - isc_action_svc_get_ib_log + - isc_action_svc_get_fb_log + - isc_action_svc_nbak + - isc_action_svc_nrest + - isc_action_svc_trace_start + - isc_action_svc_trace_stop + - isc_action_svc_trace_suspend + - isc_action_svc_trace_resume + - isc_action_svc_trace_list + - isc_action_svc_set_mapping + - isc_action_svc_drop_mapping + - isc_action_svc_display_user_adm + - isc_action_svc_validate + +Service information items +------------------------- + +.. hlist:: + :columns: 4 + + - isc_info_svc_svr_db_info + - isc_info_svc_get_config + - isc_info_svc_version + - isc_info_svc_server_version + - isc_info_svc_implementation + - isc_info_svc_capabilities + - isc_info_svc_user_dbpath + - isc_info_svc_get_env + - isc_info_svc_get_env_lock + - isc_info_svc_get_env_msg + - isc_info_svc_line + - isc_info_svc_to_eof + - isc_info_svc_timeout + - isc_info_svc_limbo_trans + - isc_info_svc_running + - isc_info_svc_get_users + - isc_info_svc_auth_block + - isc_info_svc_stdin + +BLOB action codes +----------------- + +.. hlist:: + :columns: 5 + + - blb_got_eof + - blb_got_fragment + - blb_got_full_segment + - blb_seek_relative + - blb_seek_from_tail + +Implementation codes +-------------------- + +.. hlist:: + :columns: 4 + + - isc_info_db_impl_rdb_vms + - isc_info_db_impl_rdb_eln + - isc_info_db_impl_rdb_eln_dev + - isc_info_db_impl_rdb_vms_y + - isc_info_db_impl_rdb_eln_y + - isc_info_db_impl_jri + - isc_info_db_impl_jsv + - isc_info_db_impl_isc_apl_68K + - isc_info_db_impl_isc_vax_ultr + - isc_info_db_impl_isc_vms + - isc_info_db_impl_isc_sun_68k + - isc_info_db_impl_isc_os2 + - isc_info_db_impl_isc_sun4 + - isc_info_db_impl_isc_hp_ux + - isc_info_db_impl_isc_sun_386i + - isc_info_db_impl_isc_vms_orcl + - isc_info_db_impl_isc_mac_aux + - isc_info_db_impl_isc_rt_aix + - isc_info_db_impl_isc_mips_ult + - isc_info_db_impl_isc_xenix + - isc_info_db_impl_isc_dg + - isc_info_db_impl_isc_hp_mpexl + - isc_info_db_impl_isc_hp_ux68K + - isc_info_db_impl_isc_sgi + - isc_info_db_impl_isc_sco_unix + - isc_info_db_impl_isc_cray + - isc_info_db_impl_isc_imp + - isc_info_db_impl_isc_delta + - isc_info_db_impl_isc_next + - isc_info_db_impl_isc_dos + - isc_info_db_impl_m88K + - isc_info_db_impl_unixware + - isc_info_db_impl_isc_winnt_x86 + - isc_info_db_impl_isc_epson + - isc_info_db_impl_alpha_osf + - isc_info_db_impl_alpha_vms + - isc_info_db_impl_netware_386 + - isc_info_db_impl_win_only + - isc_info_db_impl_ncr_3000 + - isc_info_db_impl_winnt_ppc + - isc_info_db_impl_dg_x86 + - isc_info_db_impl_sco_ev + - isc_info_db_impl_i386 + - isc_info_db_impl_freebsd + - isc_info_db_impl_netbsd + - isc_info_db_impl_darwin_ppc + - isc_info_db_impl_sinixz + - isc_info_db_impl_linux_sparc + - isc_info_db_impl_linux_amd64 + - isc_info_db_impl_freebsd_amd64 + - isc_info_db_impl_winnt_amd64 + - isc_info_db_impl_linux_ppc + - isc_info_db_impl_darwin_x86 + - isc_info_db_impl_linux_mipsel + - isc_info_db_impl_linux_mips + - isc_info_db_impl_darwin_x64 + - isc_info_db_impl_sun_amd64 + - isc_info_db_impl_linux_arm + - isc_info_db_impl_linux_ia64 + - isc_info_db_impl_darwin_ppc64 + - isc_info_db_impl_linux_s390x + - isc_info_db_impl_linux_s390 + - isc_info_db_impl_linux_sh + - isc_info_db_impl_linux_sheb + - isc_info_db_impl_linux_hppa + - isc_info_db_impl_linux_alpha + - isc_info_db_impl_linux_arm64 + - isc_info_db_impl_linux_ppc64el + - isc_info_db_impl_linux_ppc64 + +Info DB provider codes +---------------------- + +.. hlist:: + :columns: 4 + + - isc_info_db_code_rdb_eln + - isc_info_db_code_rdb_vms + - isc_info_db_code_interbase + - isc_info_db_code_firebird + +Info DB class codes +------------------- + +.. hlist:: + :columns: 4 + + - isc_info_db_class_access + - isc_info_db_class_y_valve + - isc_info_db_class_rem_int + - isc_info_db_class_rem_srvr + - isc_info_db_class_pipe_int + - isc_info_db_class_pipe_srvr + - isc_info_db_class_sam_int + - isc_info_db_class_sam_srvr + - isc_info_db_class_gateway + - isc_info_db_class_cache + - isc_info_db_class_classic_access + - isc_info_db_class_server_access + +Request information items +------------------------- + +.. hlist:: + :columns: 4 + + - isc_info_number_messages + - isc_info_max_message + - isc_info_max_send + - isc_info_max_receive + - isc_info_state + - isc_info_message_number + - isc_info_message_size + - isc_info_request_cost + - isc_info_access_path + - isc_info_req_select_count + - isc_info_req_insert_count + - isc_info_req_update_count + - isc_info_req_delete_count + +Access path items +----------------- + +.. hlist:: + :columns: 5 + + - isc_info_rsb_end + - isc_info_rsb_begin + - isc_info_rsb_type + - isc_info_rsb_relation + - isc_info_rsb_plan + +Record Source Block (RSB) types +------------------------------- + +.. hlist:: + :columns: 5 + + - isc_info_rsb_unknown + - isc_info_rsb_indexed + - isc_info_rsb_navigate + - isc_info_rsb_sequential + - isc_info_rsb_cross + - isc_info_rsb_sort + - isc_info_rsb_first + - isc_info_rsb_boolean + - isc_info_rsb_union + - isc_info_rsb_aggregate + - isc_info_rsb_merge + - isc_info_rsb_ext_sequential + - isc_info_rsb_ext_indexed + - isc_info_rsb_ext_dbkey + - isc_info_rsb_left_cross + - isc_info_rsb_select + - isc_info_rsb_sql_join + - isc_info_rsb_simulate + - isc_info_rsb_sim_cross + - isc_info_rsb_once + - isc_info_rsb_procedure + - isc_info_rsb_skip + - isc_info_rsb_virt_sequential + - isc_info_rsb_recursive + - isc_info_rsb_window + - isc_info_rsb_singular + - isc_info_rsb_writelock + - isc_info_rsb_buffer + - isc_info_rsb_hash + +RSB Bitmap expressions +---------------------- + +.. hlist:: + :columns: 4 + + - isc_info_rsb_and + - isc_info_rsb_or + - isc_info_rsb_dbkey + - isc_info_rsb_index + +Info request response codes +--------------------------- + +.. hlist:: + :columns: 4 + + - isc_info_req_active + - isc_info_req_inactive + - isc_info_req_send + - isc_info_req_receive + - isc_info_req_select + - isc_info_req_sql_stall + +Blob Subtypes +------------- + +.. hlist:: + :columns: 6 + + - isc_blob_untyped + - isc_blob_text + - isc_blob_blr + - isc_blob_acl + - isc_blob_ranges + - isc_blob_summary + - isc_blob_format + - isc_blob_tra + - isc_blob_extfile + - isc_blob_debug_info + - isc_blob_max_predefined_subtype + +Cancel types for fb_cancel_operation +------------------------------------ + +.. hlist:: + :columns: 4 + + - fb_cancel_disable + - fb_cancel_enable + - fb_cancel_raise + - fb_cancel_abort + +Other constants +--------------- + +- DSQL_close +- DSQL_drop +- DSQL_unprepare +- SQLDA_version1 + +- isc_info_req_select_count +- isc_info_req_insert_count +- isc_info_req_update_count +- isc_info_req_delete_count + +flags set in fb_info_crypt_state: + +- fb_info_crypt_encrypted +- fb_info_crypt_process + +:FB_API_VER: (int) Firebird API version number +:MAX_BLOB_SEGMENT_SIZE: (int) Max size for BLOB segment + +Types +===== + +Basic types +----------- + +.. autoclass:: STRING + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: WSTRING + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: FB_API_HANDLE + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_STATUS + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_STATUS_PTR + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_STATUS_ARRAY + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: FB_SQLSTATE_STRING + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_LONG + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_ULONG + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_SHORT + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_USHORT + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_UCHAR + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_SCHAR + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_INT64 + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_UINT64 + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_DATE + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_TIME + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_TIMESTAMP + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: GDS_QUAD_t + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: GDS_QUAD + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_QUAD + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_ARRAY_BOUND + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_ARRAY_DESC + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: ISC_BLOB_DESC + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: isc_blob_ctl + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: bstream + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: BSTREAM + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: FB_BLOB_STREAM + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: paramdsc + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: paramvary + :no-show-inheritance: + :no-inherited-members: + +ISC_TEB +------- + +.. autoclass:: ISC_TEB + :no-show-inheritance: + :no-inherited-members: + +XSQLVAR +------- + +.. autoclass:: XSQLVAR + :no-show-inheritance: + :no-inherited-members: + +XSQLDA +------ + +.. autoclass:: XSQLDA + :no-show-inheritance: + :no-inherited-members: + +.. autoclass:: XSQLDA_PTR + :no-show-inheritance: + :no-inherited-members: + +USER_SEC_DATA +------------- + +.. autoclass:: USER_SEC_DATA + :no-show-inheritance: + :no-inherited-members: + +RESULT_VECTOR +------------- + +.. autoclass:: RESULT_VECTOR + :no-show-inheritance: + :no-inherited-members: + +Callbacks +--------- + +.. class:: FB_SHUTDOWN_CALLBACK + + ctypes.CFUNCTYPE(UNCHECKED(c_int), c_int, c_int, POINTER(None)) + +.. class:: ISC_CALLBACK + + ctypes.CFUNCTYPE(None) + +.. class:: ISC_PRINT_CALLBACK + + ctypes.CFUNCTYPE(None, c_void_p, c_short, STRING) + +.. class:: ISC_VERSION_CALLBACK + + ctypes.CFUNCTYPE(None, c_void_p, STRING) + +.. class:: ISC_EVENT_CALLBACK + + ctypes.CFUNCTYPE(None, POINTER(ISC_UCHAR), c_ushort, POINTER(ISC_UCHAR)) + +.. autoclass:: blobcallback + :no-show-inheritance: + :no-inherited-members: + + +Other globals +============= + +:charset_map: Dictionary that maps DB CHAR SET NAME to PYTHON CODEC NAME (CANONICAL) + +.. data:: ISC_TRUE +.. data:: ISC_FALSE + +Functions +========= + + +Classes +======= + +fbclient_API +------------ + .. autoclass:: fbclient_API - :member-order: groupwise - :members: + :members: + +.. module:: fdb.blr + :synopsis: Python ctypes interface to Firebird client library (BLR) + +BLR Constants +============= + +.. note:: BLR data types are defined in :mod:`fdb.ibase` + +Main BLR codes +-------------- + +.. hlist:: + :columns: 7 + + - blr_inner + - blr_left + - blr_right + - blr_full + - blr_gds_code + - blr_sql_code + - blr_exception + - blr_trigger_code + - blr_default_code + - blr_raise + - blr_exception_msg + - blr_exception_params + - blr_sql_state + - blr_version4 + - blr_version5 + - blr_eoc + - blr_end + - blr_assignment + - blr_begin + - blr_dcl_variable + - blr_message + - blr_erase + - blr_fetch + - blr_for + - blr_if + - blr_loop + - blr_modify + - blr_handler + - blr_receive + - blr_select + - blr_send + - blr_store + - blr_label + - blr_leave + - blr_store2 + - blr_post + - blr_literal + - blr_dbkey + - blr_field + - blr_fid + - blr_parameter + - blr_variable + - blr_average + - blr_count + - blr_maximum + - blr_minimum + - blr_total + - blr_add + - blr_subtract + - blr_multiply + - blr_divide + - blr_negate + - blr_concatenate + - blr_substring + - blr_parameter2 + - blr_from + - blr_via + - blr_parameter2_old + - blr_user_name + - blr_null + - blr_equiv + - blr_eql + - blr_neq + - blr_gtr + - blr_geq + - blr_lss + - blr_leq + - blr_containing + - blr_matching + - blr_starting + - blr_between + - blr_or + - blr_and + - blr_not + - blr_any + - blr_missing + - blr_unique + - blr_like + - blr_rse + - blr_first + - blr_project + - blr_sort + - blr_boolean + - blr_ascending + - blr_descending + - blr_relation + - blr_rid + - blr_union + - blr_map + - blr_group_by + - blr_aggregate + - blr_join_type + - blr_agg_count + - blr_agg_max + - blr_agg_min + - blr_agg_total + - blr_agg_average + - blr_parameter3 + - blr_agg_count2 + - blr_agg_count_distinct + - blr_agg_total_distinct + - blr_agg_average_distinct + - blr_function + - blr_gen_id + - blr_prot_mask + - blr_upcase + - blr_lock_state + - blr_value_if + - blr_matching2 + - blr_index + - blr_ansi_like + - blr_scrollable + - blr_run_count + - blr_rs_stream + - blr_exec_proc + - blr_procedure + - blr_pid + - blr_exec_pid + - blr_singular + - blr_abort + - blr_block + - blr_error_handler + - blr_cast + - blr_pid2 + - blr_procedure2 + - blr_start_savepoint + - blr_end_savepoint + +Other BLR codes +--------------- +.. hlist:: + :columns: 6 + + - blr_domain_type_of + - blr_domain_full + - blr_date + - blr_plan + - blr_merge + - blr_join + - blr_sequential + - blr_navigational + - blr_indices + - blr_retrieve + - blr_relation2 + - blr_rid2 + - blr_set_generator + - blr_ansi_any + - blr_exists + - blr_record_version + - blr_stall + - blr_ansi_all + - blr_extract + - blr_continue + - blr_forward + - blr_backward + - blr_bof_forward + - blr_eof_backward + - blr_extract_year + - blr_extract_month + - blr_extract_day + - blr_extract_hour + - blr_extract_minute + - blr_extract_second + - blr_extract_weekday + - blr_extract_yearday + - blr_extract_millisecond + - blr_extract_week + - blr_current_date + - blr_current_timestamp + - blr_current_time + - blr_post_arg + - blr_exec_into + - blr_user_savepoint + - blr_dcl_cursor + - blr_cursor_stmt + - blr_current_timestamp2 + - blr_current_time2 + - blr_agg_list + - blr_agg_list_distinct + - blr_modify2 + - blr_current_role + - blr_skip + - blr_exec_sql + - blr_internal_info + - blr_nullsfirst + - blr_writelock + - blr_nullslast + - blr_lowcase + - blr_strlen + - blr_strlen_bit + - blr_strlen_char + - blr_strlen_octet + - blr_trim + - blr_trim_both + - blr_trim_leading + - blr_trim_trailing + - blr_trim_spaces + - blr_trim_characters + - blr_savepoint_set + - blr_savepoint_release + - blr_savepoint_undo + - blr_savepoint_release_single + - blr_cursor_open + - blr_cursor_close + - blr_cursor_fetch + - blr_cursor_fetch_scroll + - blr_croll_forward + - blr_croll_backward + - blr_croll_bof + - blr_croll_eof + - blr_croll_absolute + - blr_croll_relative + - blr_init_variable + - blr_recurse + - blr_sys_function + - blr_auto_trans + - blr_similar + - blr_exec_stmt + - blr_exec_stmt_inputs + - blr_exec_stmt_outputs + - blr_exec_stmt_sql + - blr_exec_stmt_proc_block + - blr_exec_stmt_data_src + - blr_exec_stmt_user + - blr_exec_stmt_pwd + - blr_exec_stmt_tran + - blr_exec_stmt_tran_clone + - blr_exec_stmt_privs + - blr_exec_stmt_in_params + - blr_exec_stmt_in_params2 + - blr_exec_stmt_out_params + - blr_exec_stmt_role + - blr_stmt_expr + - blr_derived_expr + - blr_procedure3 + - blr_exec_proc2 + - blr_function2 + - blr_window + - blr_partition_by + - blr_continue_loop + - blr_procedure4 + - blr_agg_function + - blr_substring_similar + - blr_bool_as_value + - blr_coalesce + - blr_decode + - blr_exec_subproc + - blr_subproc_decl + - blr_subproc + - blr_subfunc_decl + - blr_subfunc + - blr_record_version2 + - blr_gen_id2 diff -Nru fdb-1.6.1+dfsg1/sphinx/requirements.txt fdb-2.0.0/sphinx/requirements.txt --- fdb-1.6.1+dfsg1/sphinx/requirements.txt 2014-11-13 14:55:46.000000000 +0000 +++ fdb-2.0.0/sphinx/requirements.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -fdb diff -Nru fdb-1.6.1+dfsg1/sphinx/.static/basic.css fdb-2.0.0/sphinx/.static/basic.css --- fdb-1.6.1+dfsg1/sphinx/.static/basic.css 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/sphinx/.static/basic.css 2018-04-26 15:06:02.000000000 +0000 @@ -0,0 +1,790 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 450px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; + border-style: none; + padding: 0; +} + +code.descclassname { + background-color: transparent; + border-style: none; + padding: 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; + border-style: none; + padding: 0; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: relative; + left: 0px; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} + +/* -- My additions ---------------------------------------------------------- */ + +div.note { + color: black; + border: 2px solid #7a9eec; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; + background: #e1ecfe url(dialog-note.png) no-repeat 10px 8px; +} + +div.danger { + color: black; + border: 2px solid #fbc2c4; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; + background: #fbe3e4 url(dialog-note.png) no-repeat 10px 8px; +} + +div.attention { + color: black; + border: 2px solid #ffd324; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; + background: #fff6bf url(dialog-note.png) no-repeat 10px 8px; +} + +div.caution { + color: black; + border: 2px solid #ffd324; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; + background: #fff6bf url(dialog-warning.png) no-repeat 10px 8px; +} + +div.important { + color: black; + background: #fbe3e4 url(dialog-seealso.png) no-repeat 10px 8px; + border: 2px solid #fbc2c4; + border-left-style: none; + border-right-style: none; + padding: 10px 20px 10px 60px; +} + +div.seealso { + color: black; + background: #fff6bf url(dialog-seealso.png) no-repeat 10px 8px; + border: 2px solid #ffd324; + border-left-style: none; + border-right-style: none; + padding: 10px 20px 10px 60px; +} + +div.hint, div.tip { + color: black; + background: #eeffcc url(dialog-topic.png) no-repeat 10px 8px; + border: 2px solid #aacc99; + border-left-style: none; + border-right-style: none; + padding: 10px 20px 10px 60px; +} + +div.warning, div.error { + color: black; + background: #fbe3e4 url(dialog-warning.png) no-repeat 10px 8px; + border: 2px solid #fbc2c4; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; +} + +p { + text-align: justify; + padding-bottom: 5px; +} + +h1 { + background: #fff6bf; + border: 2px solid #ffd324; + border-left-style: none; + border-right-style: none; + padding: 10px 10px 10px 10px; + text-align: center; +} + +h2 { + /* background: #eeffcc; */ + border: 2px solid #aacc99; + border-left-style: none; + border-right-style: none; + border-top-style: none; + padding: 10px 0px 0px 0px; + /* text-align: center; */ +} + +h3 { + /* background: #eeffcc; */ + border: 1px solid #7a9eec; + border-left-style: none; + border-right-style: none; + border-top-style: none; + padding: 0; + /* text-align: center; */ +} + +h4 { + background: #eeffcc; + /* border: 1px solid #aacc99; */ + border-left-style: none; + border-right-style: none; + border-top-style: none; + padding: 5px 5px 5px 5px; + /* text-align: center; */ +} + Binary files /tmp/tmpCHbpDN/3oqXNv4kpv/fdb-1.6.1+dfsg1/sphinx/.static/dialog-note.png and /tmp/tmpCHbpDN/gi8KZtmtUO/fdb-2.0.0/sphinx/.static/dialog-note.png differ Binary files /tmp/tmpCHbpDN/3oqXNv4kpv/fdb-1.6.1+dfsg1/sphinx/.static/dialog-seealso.png and /tmp/tmpCHbpDN/gi8KZtmtUO/fdb-2.0.0/sphinx/.static/dialog-seealso.png differ Binary files /tmp/tmpCHbpDN/3oqXNv4kpv/fdb-1.6.1+dfsg1/sphinx/.static/dialog-topic.png and /tmp/tmpCHbpDN/gi8KZtmtUO/fdb-2.0.0/sphinx/.static/dialog-topic.png differ Binary files /tmp/tmpCHbpDN/3oqXNv4kpv/fdb-1.6.1+dfsg1/sphinx/.static/dialog-warning.png and /tmp/tmpCHbpDN/gi8KZtmtUO/fdb-2.0.0/sphinx/.static/dialog-warning.png differ diff -Nru fdb-1.6.1+dfsg1/sphinx/.static/fdbtheme.css fdb-2.0.0/sphinx/.static/fdbtheme.css --- fdb-1.6.1+dfsg1/sphinx/.static/fdbtheme.css 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/sphinx/.static/fdbtheme.css 2018-04-26 15:06:02.000000000 +0000 @@ -0,0 +1,395 @@ +/* + * pylons.css_t + * ~~~~~~~~~~~~ + * + * Sphinx stylesheet -- pylons theme. + * + * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: "Nobile", sans-serif; + font-size: 100%; + background-color: #393939; + color: #ffffff; + margin: 0; + padding: 0; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 {{ theme_sidebarwidth }}px; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.document { + background-color: #eee; +} + +div.header { + width:100%; + background: #f4ad32 url(headerbg.png) repeat-x 0 top; + border-bottom: 2px solid #ffffff; +} + +div.logo { + text-align: center; + padding-top: 10px; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 0 30px 30px 30px; + font-size: 1em; + border: 2px solid #ddd; + border-right-style: none; + overflow: auto; +} + +div.footer { + color: #ffffff; + width: 100%; + padding: 13px 0; + text-align: center; + font-size: 75%; + background: transparent; + clear:both; +} + +div.footer a { + color: #ffffff; + text-decoration: none; +} + +div.footer a:hover { + color: #e88f00; + text-decoration: underline; +} + +div.related { + line-height: 30px; + color: #373839; + font-size: 0.8em; + background-color: #eee; +} + +div.related a { + color: #1b61d6; +} + +div.related ul { + padding-left: {{ theme_sidebarwidth|toint + 10 }}px; +} + +div.sphinxsidebar { + font-size: 0.75em; + line-height: 1.5em; +} + +div.sphinxsidebarwrapper{ + padding: 10px 0; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: "Neuton", sans-serif; + color: #373839; + font-size: 1.4em; + font-weight: normal; + margin: 0; + padding: 5px 10px; + border-bottom: 2px solid #ddd; +} + +div.sphinxsidebar h4{ + font-size: 1.3em; +} + +div.sphinxsidebar h3 a { + color: #000000; +} + + +div.sphinxsidebar p { + color: #888; + padding: 5px 20px; +} + +div.sphinxsidebar p.topless { +} + +div.sphinxsidebar ul { + margin: 10px 20px; + padding: 0; + color: #373839; +} + +div.sphinxsidebar a { + color: #444; +} + +div.sphinxsidebar input { + border: 1px solid #ccc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar input[type=text]{ + margin-left: 20px; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 2px solid #c6d880; + background-color: #e6efc2; + width: 40%; + float: right; + border-right-style: none; + border-left-style: none; + padding: 10px 20px; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- body styles ----------------------------------------------------------- */ + +a, a .pre { + color: #1b61d6; + text-decoration: none; +} + +a:hover, a:hover .pre { + text-decoration: underline; +} + +/* + margin: 20px -20px 10px -20px; + headfont Trebuchet MS +*/ + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: {{ theme_headfont }}; + background-color: {{ theme_headbgcolor }}; + font-weight: normal; + color: {{ theme_headtextcolor }}; + border-bottom: 1px solid #ccc; + margin: 20px -25px 10px -25px; + padding: 3px 0 3px 10px; +} + +div.body h1 { margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 160%; } +div.body h3 { font-size: 140%; } +div.body h4 { font-size: 120%; } +div.body h5 { font-size: 110%; } +div.body h6 { font-size: 100%; } + +/* Pyramid style + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: "Neuton", sans-serif; + background-color: #ffffff; + font-weight: normal; + color: #373839; + margin: 30px 0px 10px 0px; + padding: 5px 0; +} + +div.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 150%; background-color: #ffffff; } +div.body h3 { font-size: 120%; background-color: #ffffff; } +div.body h4 { font-size: 110%; background-color: #ffffff; } +div.body h5 { font-size: 100%; background-color: #ffffff; } +div.body h6 { font-size: 100%; background-color: #ffffff; } +*/ + +a.headerlink { + color: #1b61d6; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + text-decoration: underline; +} + +div.body p, div.body dd, div.body li { + line-height: 1.5em; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.highlight{ + background-color: white; +} + +div.note { + border: 2px solid #7a9eec; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; + background: #e1ecfe url(dialog-note.png) no-repeat 10px 8px; +} + +div.danger { + border: 2px solid #fbc2c4; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; + background: #fbe3e4 url(dialog-note.png) no-repeat 10px 8px; +} + +div.attention { + border: 2px solid #ffd324; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; + background: #fff6bf url(dialog-note.png) no-repeat 10px 8px; +} + +div.caution { + border: 2px solid #ffd324; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; + background: #fff6bf url(dialog-warning.png) no-repeat 10px 8px; +} + +div.important { + background: #fbe3e4 url(dialog-seealso.png) no-repeat 10px 8px; + border: 2px solid #fbc2c4; + border-left-style: none; + border-right-style: none; + padding: 10px 20px 10px 60px; +} + +div.seealso { + background: #fff6bf url(dialog-seealso.png) no-repeat 10px 8px; + border: 2px solid #ffd324; + border-left-style: none; + border-right-style: none; + padding: 10px 20px 10px 60px; +} + +div.hint, div.tip { + background: #eeffcc url(dialog-topic.png) no-repeat 10px 8px; + border: 2px solid #aacc99; + border-left-style: none; + border-right-style: none; + padding: 10px 20px 10px 60px; +} + +div.topic { + background: #eeeeee; + border: 2px solid #C6C9CB; + padding: 10px 20px; + border-right-style: none; + border-left-style: none; +} + +div.warning, div.error { + background: #fbe3e4 url(dialog-warning.png) no-repeat 10px 8px; + border: 2px solid #fbc2c4; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; +} + +p.admonition-title { + display: none; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 10px; + background-color: #fafafa; +/* background-color: #eeffcc; */ + color: #222; + line-height: 1.2em; + border: 2px solid #C6C9CB; +/* border: 2px solid #aacc99; */ + font-size: 1.1em; + margin: 1.5em 0 1.5em 0; + border-right-style: none; + border-left-style: none; +} + +tt { + background-color: transparent; + color: #222; + font-size: 1.1em; + font-family: monospace; +} + +.viewcode-back { + font-family: "Nobile", sans-serif; +} + +div.viewcode-block:target { + background-color: #fff6bf; + border: 2px solid #ffd324; + border-left-style: none; + border-right-style: none; + padding: 10px 20px; +} + +table.highlighttable { + width: 100%; +} + +table.highlighttable td { + padding: 0; +} + +a em.std-term { + color: #007f00; +} + +a:hover em.std-term { + text-decoration: underline; +} + +.download { + font-family: "Nobile", sans-serif; + font-weight: normal; + font-style: normal; +} + +tt.xref { + font-weight: normal; + font-style: normal; +} diff -Nru fdb-1.6.1+dfsg1/sphinx/usage-guide.txt fdb-2.0.0/sphinx/usage-guide.txt --- fdb-1.6.1+dfsg1/sphinx/usage-guide.txt 2016-11-30 11:14:58.000000000 +0000 +++ fdb-2.0.0/sphinx/usage-guide.txt 2018-04-26 14:39:03.000000000 +0000 @@ -14,10 +14,14 @@ * :mod:`~fdb.fbcore` - Main driver source code. * :mod:`~fdb.services` - Driver code to work with Firebird Services. * :mod:`~fdb.schema` - Driver code to work with Firebird database schema (metadata). +* :mod:`~fdb.monitor` - Driver code to work with Firebird monitoring tables. +* :mod:`~fdb.trace` - Code for Firebird Trace & Audit processing. +* :mod:`~fdb.gstat` - Code for Firebird gstat output processing. +* :mod:`~fdb.log` - Code for Firebird server log processing. * :mod:`~fdb.utils` - Various classes and functions used by driver that are generally useful. +* :mod:`~fdb.blr` - Firebird BLR-related definitions. -All important data, functions, classes and constants are available directly in fdb namespace, so there is not need to import or use :mod:`~fdb.fbcore` and :mod:`~fdb.ibase` submodules directly. Exception is the :mod:`fdb.services` submodule that contains functions and classes for work with Firebird Services. Because Services are optional, not so frequently used Firebird facility, all -service-related code was isolated in separate submodule rather than exposed directly through main module namespace. Because :mod:`~fdb.services` submodule contains names also used by main driver (:func:`~fdb.services.connect`, :class:`~fdb.services.Connection`), it's advised to use fully qualified names when refering to them instead importing them via `from fdb.services import ...`. +All important data, functions, classes and constants are available directly in fdb namespace, so there is no need to import or use :mod:`~fdb.fbcore` and :mod:`~fdb.ibase` submodules directly. Other submodules (like :mod:`fdb.services` submodule that contains functions and classes for work with Firebird Services) contain optional driver functionality that is not exposed directly through main module namespace. Because :mod:`~fdb.services` submodule contains names also used by main driver (:func:`~fdb.services.connect`, :class:`~fdb.services.Connection`), it's advised to use fully qualified names when refering to them instead importing them via `from fdb.services import ...`. .. index:: Database @@ -178,6 +182,9 @@ FDB also provides convenient constants for supported engine versions: `ODS_FB_20`,`ODS_FB_21` and `ODS_FB_25`. +.. index:: + pair: Database; On-disk Structure + Database On-Disk Structure -------------------------- @@ -407,7 +414,6 @@ * You don't need to handle conversions from Python data types to strings. * FDB will handle all data type conversions (if necessary) from Python data types to Firebird ones, including `None/NULL` conversion and conversion from `unicode` to `byte strings` in encoding expected by server. * You may pass BLOB values as open `file-like` objects, and FDB will handle the transfer of BLOB value. -* If you'll pass exactly the same command `string` again to particular :class:`Cursor` instance, it will be executed more efficiently (see section about `Prepared Statements`_ for details). Parametrized statemets also have some limitations. Currently: @@ -739,7 +745,7 @@ The database engine treats most SQL data types in a weakly typed fashion: the engine may attempt to convert the raw value to a different type, as appropriate for the current context. For instance, the SQL expressions `123` (integer) and `‘123’` (string) are treated equivalently when the value is to be inserted into an `integer` field; the same applies when `‘123’` and `123` are to be inserted into a `varchar` field. -This weak typing model is quite unlike Python’s dynamic yet strong typing. Although weak typing is regarded with suspicion by most experienced Python programmers, the database engine is in certain situations so aggressive about its typing model that KInterbasDB must compromise in order to remain an elegant means of programming the database engine. +This weak typing model is quite unlike Python’s dynamic yet strong typing. Although weak typing is regarded with suspicion by most experienced Python programmers, the database engine is in certain situations so aggressive about its typing model that FDB must compromise in order to remain an elegant means of programming the database engine. An example is the handling of “magic values” for date and time fields. The database engine interprets certain string values such as `‘yesterday’` and `‘now’` as having special meaning in a date/time context. If FDB did not accept strings as the values of parameters destined for storage in date/time fields, the resulting code would be awkward. Consider the difference between the two Python snippets below, which insert a row containing an integer and a timestamp into a table defined with the following DDL statement: @@ -811,6 +817,8 @@ .. index:: BLOB pair: Data; BLOB + pair: BLOB; materialized + pair: BLOB; stream .. _working_with_blobs: @@ -832,6 +840,25 @@ * For **input** values, simply use :ref:`parametrized statement ` and pass any `file-like` object in place of BLOB parameter. The `file-like` object must implement only the :meth:`~file.read` method, as no other metod is used. * For **output** values, you have to call :meth:`Cursor.set_stream_blob` (or :meth:`PreparedStatement.set_stream_blob`) method with specification of column name(s) that should be returned as `file-like` objects. FDB then returns :class:`BlobReader` instance instead string in place of returned BLOB value for these column(s). +.. important:: + + **Before FDB version 1.8** load of materialized blob with multiple segments (i.e. larger than 64K) failed with error (SQLCODE: 101 - segment buffer length shorter than expected). This was an artefact of backward + compatibility with KInterbasDB that prevented them to exhaust your memory + with very large materialized blobs. + + **Since FDB version 1.8** this memory exhaustion safeguard was enhanced in + more convenient (but backward incompatible) way. New methods + :meth:`PreparedStatement.set_stream_blob_treshold()` and :meth:`Cursor.set_stream_blob_treshold()` were introduced to + control the maximum size of materialized blobs. When particular blob value exceeds this threshold, an instance of :class:`BlobReader` is + returned instead string value, so your application has to be prepared to handle BLOBs in both incarnations. + + Zero value effectively forces all blobs to be returned as stream blobs. + Negative value means no size limit for materialized blobs (use at your + own risk). **The default treshold value is 64K.** + + Blob size treshold has effect only on materialized blob columns, i.e. columns not explicitly requested to be returned as streamed ones using :meth:`PreparedStatement.set_stream_blob` that are always returned as stream blobs. + + The :class:`BlobReader` instance is bound to particular BLOB value returned by server, so its life time is limited. The actual BLOB value is not opened initially, so no additonal API calls to server are made if you'll decide to ignore the value completely. You also don't need to open the BLOB value explicitly, as BLOB is opened automatically on first call to :meth:`~BlobReader.next`, :meth:`~BlobReader.read`, :meth:`~BlobReader.readline`, :meth:`~BlobReader.readlines` or :meth:`~BlobReader.seek`. However, it's good practice to :meth:`~BlobReader.close` the reader once you're finished reading, as it's likely that Python's garbage collector would call the `__del__` method too late, when fetch context is already gone, and closing the reader would cause an error. .. warning:: @@ -1282,7 +1309,7 @@ For more information about retaining transactions, see `Firebird documentation`. -.. index:: +.. index:: SAVEPOINT pair: Transaction; SAVEPOINT Savepoints @@ -1549,6 +1576,8 @@ db1: [(1, None), (2, None), (3, None)] db2: [(1, None), (2, None), (3, None)] +.. index:: + pair: Transaction; context manager .. _transaction-context-manager: @@ -1662,24 +1691,12 @@ as nasty as they are numerous, but the essence of using asynchronous notification from C is as follows: - #. Call :c:func:`isc_event_block()` to create a formatted binary buffer that - will tell the server which events the client wants to listen for. - #. Call :c:func:`isc_que_events()` (passing the buffer created in the previous - step) to inform the server that the client is ready to receive event - notifications, and provide a callback that will be asynchronously - invoked when one or more of the registered events occurs. - #. [The thread that called :c:func:`isc_que_events()` to initiate event - listening must now do something else.] - #. When the callback is invoked (the database client library starts a - thread dedicated to this purpose), it can use the :c:func:`isc_event_counts()` - function to determine how many times each of the registered events has - occurred since the last call to :c:func:`isc_event_counts()` (if any). - #. [The callback thread should now "do its thing", which may include - communicating with the thread that called :c:func:`isc_que_events()`.] - #. When the callback thread is finished handling an event - notification, it must call :c:func:`isc_que_events()` again in order to receive - future notifications. Future notifications will invoke the callback - again, effectively "looping" the callback thread back to Step 4. + #. Call :c:func:`isc_event_block()` to create a formatted binary buffer that will tell the server which events the client wants to listen for. + #. Call :c:func:`isc_que_events()` (passing the buffer created in the previous step) to inform the server that the client is ready to receive event notifications, and provide a callback that will be asynchronously invoked when one or more of the registered events occurs. + #. [The thread that called :c:func:`isc_que_events()` to initiate event listening must now do something else.] + #. When the callback is invoked (the database client library starts a thread dedicated to this purpose), it can use the :c:func:`isc_event_counts()` function to determine how many times each of the registered events has occurred since the last call to :c:func:`isc_event_counts()` (if any). + #. [The callback thread should now "do its thing", which may include communicating with the thread that called :c:func:`isc_que_events()`.] + #. When the callback thread is finished handling an event notification, it must call :c:func:`isc_que_events()` again in order to receive future notifications. Future notifications will invoke the callback again, effectively "looping" the callback thread back to Step 4. API for Python developers ------------------------- @@ -1829,6 +1846,7 @@ .. currentModule:: fdb.services .. index:: Services + pair: Services; working with .. _working_with_services: @@ -1841,6 +1859,9 @@ The native Services API, though consistent, is much lower-level than a Pythonic API. If the native version were exposed directly, accomplishing a given task would probably require more Python code than scripting the traditional command-line tools. For this reason, FDB presents its own abstraction over the native API via the :mod:`fdb.services` module. +.. index:: + pair: Services; connection + Services API Connections ------------------------ @@ -1848,9 +1869,9 @@ This constructor has three keyword parameters: - :host: The network name of the computer on which the database server is running. - :user: The name of the database user under whose authority the maintenance tasks are to be performed. - :password: User’s password. +:host: The network name of the computer on which the database server is running. +:user: The name of the database user under whose authority the maintenance tasks are to be performed. +:password: User’s password. Since maintenance operations are most often initiated by an administrative user on the same computer as the database server, `host` defaults to the local computer, and `user` defaults to `SYSDBA`. @@ -1874,10 +1895,13 @@ * `Server Configuration and State`_: To get information about server configuration, active attachments or users, or to get content of server log. * `Database options`_: To set various database parameters like size of page cache, access mode or SQL dialect. * `Database maintenance`_: To perform backup, restore, validation or other database maintenance tasks. -* `User maintanance`_: To get or change information about users defined in security database, to create new or remove users. +* `User maintenance`_: To get or change information about users defined in security database, to create new or remove users. * `Trace service`_: To start, stop, pause/resume or list Firebird `trace sessions`. * `Text ouput from Services`_: Some services like `backup` or `trace` may return significant amount of text. This output is not returned directly by method that starts the service, but through separate methods that emulate read from text file, or provide :ref:`iterator protocol ` support on `Connection`. +.. index:: + pair: Services; server information + Server Configuration and State ------------------------------ @@ -2055,6 +2079,11 @@ >>> con.get_log() >>> log = con.readlines() + .. tip:: You can use :mod:`fdb.log` module for parsing and further data processing. + +.. index:: + pair: Services; database options + Database options ---------------- @@ -2132,6 +2161,8 @@ # Use SQL dialect 1 >>> con.set_sql_dialect('employee',1) +.. index:: + triple: Services; Database; maintenance Database maintenance -------------------- @@ -2151,7 +2182,7 @@ :meth:`~Connection.get_statistics` Request database statisctics. Report is in the same format as the output of the gstat command-line utility. This method has one required parameter, the location of the database on which to compute statistics, and six optional boolean parameters for controlling the domain of the statistics. - + This method is so-called `Async method` that only initiates report processing. Actual report could be read by one from many methods for `text ouput from Services`_ that `Connection` provides . .. note:: @@ -2165,6 +2196,8 @@ >>> con = services.connect(host='localhost', user='sysdba', password='masterkey') >>> con.get_statistics('employee') >>> stat_report = con.readlines() + + .. tip:: You can use :mod:`fdb.gstat` module for parsing and further data processing. :meth:`~Connection.backup` @@ -2185,6 +2218,11 @@ >>> con.backup('employee', '/home/data/employee.fbk', metadata_only=True, collect_garbage=False) >>> backup_report = con.readlines() + + .. note:: + + :meth:`~Connection.backup` creates a backup file on server host. Alternatively you can use :meth:`~Connection.local_backup` to create a backup file on local machine. + :meth:`~Connection.restore` Request database restore from logical (GBAK) backup. Produces report about restore process. @@ -2204,6 +2242,10 @@ >>> con.restore('/home/data/employee.fbk', '/home/data/empcopy.fdb') >>> restore_report = con.readlines() + .. note:: + + :meth:`~Connection.restore` uses a backup file on server host. Alternatively you can use :meth:`~Connection.local_restore` to use a backup file on local machine. + :meth:`~Connection.nbackup` Perform physical (NBACKUP) database backup. @@ -2295,8 +2337,10 @@ # Mend the database >>> con.repair('empoyee', ignore_checksums=True, mend_database=True) +.. index:: + triple: Services; Database; users -User maintanance +User maintenance ---------------- :meth:`~Connection.get_users` @@ -2370,9 +2414,14 @@ >>> con.user_exists('NewUser') False +.. index:: + pair: Services; trace + Trace service ------------- +.. tip:: You can use :mod:`fdb.trace` module for parsing and further data processing. + :meth:`~Connection.trace_start` Starts new trace session. Requires trace `configuration` and returns `Session ID`. @@ -2462,6 +2511,8 @@ 'flags': ['active', ' admin', ' trace'], 'user': 'SYSDBA'}} +.. index:: + pair: Services; output Text ouput from Services ------------------------ @@ -2566,6 +2617,7 @@ gbak:writing names mapping gbak:closing file, committing, and finishing. 74752 bytes written +.. currentModule:: fdb.schema .. index:: pair: Database; schema @@ -2627,7 +2679,8 @@ Because once loaded information is cached, it's good to :meth:`clar ` it when it's no longer needed to conserve memory. - +.. index:: + pair: Database schema; categories Available information --------------------- @@ -2637,22 +2690,22 @@ - **Database:** :attr:`Owner name `, :attr:`default character set `, :attr:`description `, - :attr:`security class ` and whether database consist + :attr:`security class `, + :attr:`nbackup backup history ` and whether database consist from :meth:`single or multiple files `. - **Facilities:** Available :attr:`character sets `, - :attr:`collations `, database :attr:`files ` and + :attr:`collations `, BLOB :attr:`filters `, database :attr:`files ` and :attr:`shadows `. - **User database objects:** :attr:`exceptions `, :attr:`generators `, :attr:`domains `, :attr:`tables ` and their :attr:`constraints `, :attr:`indices `, :attr:`views `, :attr:`triggers `, :attr:`procedures `, - user :attr:`roles ` and :attr:`user defined functions `. + user :attr:`roles `, :attr:`user defined functions ` and :attr:`packages `. - **System database objects:** :attr:`generators `, :attr:`domains `, :attr:`tables ` and their constraints, :attr:`indices `, :attr:`views `, - :attr:`triggers `, :attr:`procedures ` - and :attr:`functions `. + :attr:`triggers `, :attr:`procedures `, :attr:`functions ` and :attr:`backup history `. - **Relations between objects:** Through direct links between metadata objects and :attr:`dependencies `. - **Privileges:** :attr:`All ` privileges, or privileges granted for specific @@ -2662,6 +2715,9 @@ possible to get all privileges :meth:`granted to ` specific user, role, procedure, trigger or view. +.. index:: + pair: Database schema; metadata objects + Metadata objects ---------------- @@ -2680,7 +2736,7 @@ - :meth:`~fdb.schema.BaseSchemaItem.get_quoted_name`: Returns quoted (if necessary) name of database object. - :meth:`~fdb.schema.BaseSchemaItem.get_dependents`: Returns list of all database objects that :ref:`depend ` on this one. - :meth:`~fdb.schema.BaseSchemaItem.get_dependencies`: Returns list of database objects that this object :ref:`depend ` on. -- :meth:`~fdb.schema.BaseSchemaItem.get_sql_for`: Returns :ref:`SQL command string ` for specified action on database object. +- :meth:`~fdb.schema.BaseSchemaItem.get_sql_for`: Returns :ref:`SQL command string ` for specified action on database object. There are next schema objects: :class:`~fdb.schema.Collation`, :class:`~fdb.schema.CharacterSet`, :class:`~fdb.schema.DatabaseException`, :class:`~fdb.schema.Sequence` (Generator), :class:`~fdb.schema.Domain`, @@ -2688,16 +2744,25 @@ :class:`~fdb.schema.Constraint`, :class:`~fdb.schema.View`, :class:`~fdb.schema.ViewColumn`, :class:`~fdb.schema.Trigger`, :class:`~fdb.schema.Procedure`, :class:`~fdb.schema.ProcedureParameter`, :class:`~fdb.schema.Function`, :class:`~fdb.schema.FunctionArgument`, :class:`~fdb.schema.Role`, -:class:`~fdb.schema.Dependency`, :class:`~fdb.schema.DatabaseFile` and :class:`~fdb.schema.Shadow`. +:class:`~fdb.schema.Dependency`, :class:`~fdb.schema.DatabaseFile`, :class:`~fdb.schema.Shadow`, :class:`~fdb.schema.Package`, :class:`~fdb.schema.Filter`, :class:`~fdb.schema.BackupHistory` and :class:`~fdb.schema.Privilege`. + +.. index:: + pair: visitor pattern; usage + pair: Database schema; visitor pattern .. _visitor-pattern-support: Visitor Pattern support ----------------------- -`Visitor Pattern`_ is particularly useful when you need to process various objects that need special handling in common algorithm (for example display information about them or generate SQL commands to create them in new database). Each metadata object (including :class:`~fdb.schema.Schema`) supports :meth:`~fdb.schema.BaseSchemaItem.accept_visitor` method that calls `class-specific` method on object passed to it as parameter. This method name always starts with 'visit'. For example :class:`~fdb.schema.Table` object calls `visitTable`. This 'visit*' method has one parameter - object instance that calls it. +.. versionchanged:: 2.0 -FDB provides skeleton implementation for :class:`schema Visitor object ` that implements all `visit*` methods called by schema classes as call to method `default_action` that does nothing. + Class *fdb.schema.SchemaVisitor* was replaced with :class:`fdb.utils.Visitor` class. + +`Visitor Pattern`_ is particularly useful when you need to process various objects that need special handling in common algorithm (for example display information about them or generate SQL commands to create them in new database). Each metadata objects (including :class:`~fdb.schema.Schema`) descend from :class:`~fdb.utils.Visitable` class and thus support :meth:`~fdb.schema.BaseSchemaItem.accept` method that calls visitor's :meth:`~fdb.utils.Visitor.visit` method. This method dispatch calls to specific class-handling method or :meth:`fdb.utils.Visitor.default_action` if there is no such special class-handling method defined in your visitor class. Special class-handling methods must have a name that follows *visit_* pattern, for example method that should handle :class:`~fdb.schema.Table` (or its descendants) objects must be named as *visit_Table*. + +.. index:: + pair: visitor pattern; example Next code uses visitor pattern to print all DROP SQL statements necessary to drop database object, taking its dependencies into account, i.e. it could be necessary to first drop other - dependant objects before it could be dropped. @@ -2705,41 +2770,29 @@ import fdb # Object dropper - class ObjectDropper(fdb.schema.SchemaVisitor): + class ObjectDropper(fdb.utils.Visitor): def __init__(self): self.seen = [] - def drop(self,obj): - self.seen = [] - obj.accept_visitor(self) - def default_action(self,obj): + def drop(self, obj): + self.seen = [] + obj.accept(self) # You can call self.visit(obj) directly here as well + def default_action(self, obj): if not obj.issystemobject() and 'drop' in obj.actions: for dependency in obj.get_dependents(): d = dependency.dependent if d and d not in self.seen: - d.accept_visitor(self) + d.accept(self) if obj not in self.seen: print obj.get_sql_for('drop') self.seen.append(obj) - def visitSchema(self,schema): - pass - def visitMetadataItem(self,item): - pass - def visitTableColumn(self,column): - column.table.accept_visitor(self) - def visitViewColumn(self,column): - column.view.accept_visitor(self) - def visitDependency(self,dependency): - pass - def visitConstraint(self,constraint): - pass - def visitProcedureParameter(self,param): - param.procedure.accept_visitor(self) - def visitFunctionArgument(self,arg): - arg.function.accept_visitor(self) - def visitDatabaseFile(self,dbfile): - pass - def visitShadow(self,shadow): - pass + def visit_TableColumn(self, column): + column.table.accept(self) + def visit_ViewColumn(self, column): + column.view.accept(self) + def visit_ProcedureParameter(self, param): + param.procedure.accept(self) + def visit_FunctionArgument(self, arg): + arg.function.accept(self) # Sample use: @@ -2756,6 +2809,10 @@ .. _object-dependencies: +.. index:: + pair: dependencies; working with + pair: Database schema; dependencies + Object dependencies ------------------- @@ -2763,12 +2820,31 @@ :class:`~fdb.schema.Dependency` object provides names and types of dependent/depended on database objects, and access to their respective schema Python objects as well. -.. _sql-oprations: +.. _enhanced-object-list: + +.. index:: + pair: list; enhanced + +Enhanced list of objects +------------------------ + +.. versionadded:: 2.0 + +Whenever possible, schema module uses enhanced :class:`~fdb.utils.ObjectList` list descendant for collections of metadata objects. This enhanced list provides several convenient methods for advanced list processing: + +- filtering - :meth:`~fdb.utils.ObjectList.filter`, :meth:`~fdb.utils.ObjectList.ifilter` and :meth:`~fdb.utils.ObjectList.ifilterfalse` +- sorting - :meth:`~fdb.utils.ObjectList.sort` +- extracting/splitting - :meth:`~fdb.utils.ObjectList.extract` and :meth:`~fdb.utils.ObjectList.split` +- testing - :meth:`~fdb.utils.ObjectList.contains`, :meth:`~fdb.utils.ObjectList.all` and :meth:`~fdb.utils.ObjectList.any` +- reporting - :meth:`~fdb.utils.ObjectList.ecount`, :meth:`~fdb.utils.ObjectList.report` and :meth:`~fdb.utils.ObjectList.ireport` +- fast key access - :attr:`~fdb.utils.ObjectList.key`, :attr:`~fdb.utils.ObjectList.frozen`, :meth:`~fdb.utils.ObjectList.freeze` and :meth:`~fdb.utils.ObjectList.get` + +.. _sql-operations: SQL operations -------------- -FDB doesn't allow you to change database metadata directly using schema objects. Instead it supports generation of DDL SQL commands from schema objects using :meth:`~fdb.schema.BaseSchemaItem.get_sql_for` method present on all schema objects except Schema itself. DDL commands that could be generated depend on object type and context (for example it's not possible to generate DDL commands for system database objects), and list of DDL commands that could be generated for particular schema object could be obtained from its :attr:`~fdb.schema.BaseSchemaItem.actions` attribute. +FDB doesn't allow you to change database metadata directly using schema objects. Instead it supports generation of DDL SQL commands from schema objects using :meth:`~fdb.schema.BaseSchemaItem.get_sql_for` method present on all schema objects except Schema itself. DDL commands that could be generated depend on object type and context (for example it's not possible to generate all DDL commands for system database objects), and list of DDL commands that could be generated for particular schema object could be obtained from its :attr:`~fdb.schema.BaseSchemaItem.actions` attribute. Possible `actions` could be: create, recreate, create_or_alter, alter, drop, activate, deactivate, recompute and declare. Some actions require/allow additional parameters. @@ -2791,11 +2867,21 @@ - - - + * - + - comment + - + - + - * - :class:`~fdb.schema.CharacterSet` - alter - collation - Yes - :class:`~fdb.schema.Collation` instance or collation name + * - + - comment + - + - + - * - :class:`~fdb.schema.DatabaseException` - create - @@ -2821,6 +2907,11 @@ - - - + * - + - comment + - + - + - * - :class:`~fdb.schema.Sequence` - create - @@ -2836,6 +2927,11 @@ - - - + * - + - comment + - + - + - * - :class:`~fdb.schema.Domain` - create - @@ -2871,6 +2967,11 @@ - - - + * - + - comment + - + - + - * - :class:`~fdb.schema.Constraint` - create - @@ -2906,21 +3007,51 @@ - - - + * - + - comment + - + - + - * - :class:`~fdb.schema.Table` - create - - - * - + - + - no_pk + - No + - Do not generate PK constraint + * - + - + - no_unique + - No + - Do not generate unique constraints + * - - recreate - - - * - + - + - no_pk + - No + - Do not generate PK constraint + * - + - + - no_unique + - No + - Do not generate unique constraints + * - - drop - - - + * - + - comment + - + - + - * - :class:`~fdb.schema.TableColumn` - alter - @@ -2946,6 +3077,21 @@ - expression - No - string with COMPUTED BY expression + * - + - + - restart + - No + - None or initial value + * - + - drop + - + - + - + * - + - comment + - + - + - * - :class:`~fdb.schema.View` - create - @@ -2981,11 +3127,16 @@ - - - - * - :class:`~fdb.schema.Trigger` - - create + * - + - comment - - - - + - + * - :class:`~fdb.schema.Trigger` + - create + - inactive + - No + - Create inactive trigger * - - recreate - @@ -3031,6 +3182,11 @@ - - - + * - + - comment + - + - + - * - :class:`~fdb.schema.Procedure` - create - no_code @@ -3047,10 +3203,35 @@ - No - True to supress procedure body from output * - + - alter + - input + - No + - Input parameters + * - + - + - output + - No + - Output parameters + * - + - + - declare + - No + - Variable declarations + * - + - + - code + - Yes + - Procedure code / body + * - - drop - - - + * - + - comment + - + - + - * - :class:`~fdb.schema.Role` - create - @@ -3061,6 +3242,11 @@ - - - + * - + - comment + - + - + - * - :class:`~fdb.schema.Function` - declare - @@ -3071,6 +3257,46 @@ - - - + * - + - create + - no_code + - No + - Generate PSQL function code or not + * - + - create_or_alter + - no_code + - No + - Generate PSQL function code or not + * - + - recreate + - no_code + - No + - Generate PSQL function code or not + * - + - alter + - arguments + - No + - Function arguments + * - + - + - returns + - Yes + - Function return value + * - + - + - declare + - No + - Variable declarations + * - + - + - code + - Yes + - PSQL function body / code + * - + - comment + - + - + - * - :class:`~fdb.schema.DatabaseFile` - create - @@ -3083,9 +3309,9 @@ - * - - drop - - - - - - + - preserve + - No + - Preserve file or not * - :class:`~fdb.schema.Privilege` - grant - grantors @@ -3102,6 +3328,31 @@ - No - True to get REVOKE of GRANT/ADMIN OPTION only. Raises ProgrammingError if privilege doesn't have such option. + * - :class:`~fdb.schema.Package` + - create + - body + - No + - (bool) Generate package body + * - + - recreate + - body + - No + - (bool) Generate package body + * - + - create_or_alter + - body + - No + - (bool) Generate package body + * - + - alter + - header + - No + - (string_or_list) Package header + * - + - drop + - body + - No + - (bool) Drop only package body **Examples:** @@ -3183,6 +3434,9 @@ Out2 = 'Value' END +.. index:: + pair: privileges; working with + Working with user privileges ============================ @@ -3190,7 +3444,7 @@ :attr:`table `, :attr:`table column `, :attr:`view `, :attr:`view column `, :attr:`procedure ` or :attr:`role `. It's also possible to get all privileges :meth:`granted to ` specific user, role, procedure, trigger or view. :class:`~fdb.schema.Privilege` class supports :meth:`~fdb.schema.Privilege.get_sql_for` method to generate GRANT and REVOKE SQL statements for given privilege. If you want to generate grant/revoke statements for -set of privileges (for example all privileges granted on specific object or grated to specific user), it's more convenient to use function :func:`~fdb.schema.get_grants` that returns list of minimal set of SQL commands required to task. +set of privileges (for example all privileges granted on specific object or grated to specific user), it's more convenient to use function :func:`~fdb.schema.get_grants` that returns list of minimal set of SQL commands required for task. **Examples:** @@ -3220,6 +3474,11 @@ Normally generated GRANT/REVOKE statements don't contain grantor's name, because GRANTED BY clause is supported only since Firebird 2.5. If you want to get GRANT/REVOKE statements including this clause, use `grantors` parameter for `get_sql_for` and `get_grants`. This parameter is a list of grantor names, and GRANTED BY clause is generated only for privileges not granted by user from this list. It's useful to suppress GRANTED BY clause for SYSDBA or database owner. +.. currentModule:: fdb.monitor + +.. index:: + pair: monitoring tables; working with + Working with monitoring tables ============================== @@ -3287,6 +3546,96 @@ Because once loaded information is cached, it's good to :meth:`clear ` it when it's no longer needed to conserve memory. +.. currentModule:: fdb + +.. _driver-hooks: + +.. index:: + pair: driver; hooks + pair: hooks; usage + +Driver hooks +============ + +.. versionadded:: 2.0 + +FDB provides internal notification mechanism that allows installation of custom *hooks* into certain driver tasks. This mechanism consists from next functions: + +- :meth:`fdb.add_hook` that instals hook function for specified *hook_type*. +- :meth:`fdb.remove_hook` that uninstalls previously installed hook function for specified *hook_type*. +- :meth:`fdb.get_hooks` that returns list of installed hook routines for specified *hook_type*. + +.. index:: + pair: hooks; types + +.. index:: HOOK_API_LOADED, HOOK_DATABASE_ATTACHED, HOOK_DATABASE_ATTACH_REQUEST +.. index:: HOOK_DATABASE_DETACH_REQUEST, HOOK_DATABASE_CLOSED, HOOK_SERVICE_ATTACHED + +FDB provides next *hook types* (exposed as constants in *fdb* namespace): + +.. data:: HOOK_API_LOADED + + This hook is invoked once when instance of :class:`~fdb.ibase.fbclient_API` is created. It could be used for additional initialization tasks that require Firebird API. + + Hook routine must have signature: *hook_func(api)*. Any value returned by hook is ignored. + +.. data:: HOOK_DATABASE_ATTACHED + + This hook is invoked just before :class:`Connection` (or subclass) instance is returned to the client application. + + Hook routine must have signature: *hook_func(connection)*. Any value returned by hook is ignored. + +.. data:: HOOK_DATABASE_ATTACH_REQUEST + + This hook is invoked after all parameters are preprocessed and before :class:`Connection` is created. + + Hook routine must have signature: *hook_func(dsn, dpb)* where `dpb` is :class:`~fdb.ParameterBuffer` instance. It may return :class:`Connection` (or subclass) instance or None. First instance returned by any hook of this type will become the return value of caller function and other hooks of the same type are not invoked. + +.. data:: HOOK_DATABASE_DETACH_REQUEST + + This hook is invoked before connection is closed. + + Hook must have signature: *hook_func(connection)*. If any hook function returns True, connection is not closed. + +.. data:: HOOK_DATABASE_CLOSED + + This hook is invoked after connection is closed. + + Hook must have signature: *hook_func(connection)*. Any value returned by hook is ignored. + +.. data:: HOOK_SERVICE_ATTACHED + + This hook is invoked before :class:`fdb.services.Connection` instance is returned. + + Hook must have signature: hook_func(connection). Any value returned by hook is ignored. + +.. index:: + pair: hooks; invocation + +Installed hook functions are invoked by next fdb code. + +:func:`fdb.load_api` hooks: + +- Event :data:`HOOK_API_LOADED` + +:func:`fdb.connect` hooks: + +- Event :data:`HOOK_DATABASE_ATTACH_REQUEST` +- Event :data:`HOOK_DATABASE_ATTACHED` + +:func:`fdb.create_database` hooks: + +- Event :data:`HOOK_DATABASE_ATTACHED` + +:meth:`fdb.Connection.close` hooks: + +- Event :data:`HOOK_DATABASE_DETACH_REQUEST` +- Event :data:`HOOK_DATABASE_CLOSED` + +:func:`fdb.services.connect` hooks: + +- Event :data:`HOOK_SERVICE_ATTACHED` + Binary files /tmp/tmpCHbpDN/3oqXNv4kpv/fdb-1.6.1+dfsg1/test/fbtest20.fdb and /tmp/tmpCHbpDN/gi8KZtmtUO/fdb-2.0.0/test/fbtest20.fdb differ Binary files /tmp/tmpCHbpDN/3oqXNv4kpv/fdb-1.6.1+dfsg1/test/fbtest25.fdb and /tmp/tmpCHbpDN/gi8KZtmtUO/fdb-2.0.0/test/fbtest25.fdb differ Binary files /tmp/tmpCHbpDN/3oqXNv4kpv/fdb-1.6.1+dfsg1/test/fbtest30.fdb and /tmp/tmpCHbpDN/gi8KZtmtUO/fdb-2.0.0/test/fbtest30.fdb differ diff -Nru fdb-1.6.1+dfsg1/test/gstat25-a.out fdb-2.0.0/test/gstat25-a.out --- fdb-1.6.1+dfsg1/test/gstat25-a.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat25-a.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,574 @@ + +Database "/home/fdb/test/fbtest25.fdb" +Gstat execution time Wed Apr 4 15:30:10 2018 + +Database header page information: + Flags 0 + Checksum 12345 + Generation 2844 + Page size 4096 + ODS version 11.2 + Oldest transaction 204 + Oldest active 1807 + Oldest snapshot 1807 + Next transaction 1807 + Bumped transaction 1 + Sequence number 0 + Next attachment ID 1067 + Implementation ID 24 + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date May 27, 2013 23:40:53 + Attributes force write + + Variable header data: + Sweep interval: 20000 + *END* + + +Database file sequence: +File /home/fdb/test/fbtest25.fdb is the only file + +Analyzing database pages ... +AR (142) + Primary pointer page: 209, Index root page: 210 + Data pages: 1, data page slots: 1, average fill: 86% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + +COUNTRY (128) + Primary pointer page: 180, Index root page: 181 + Data pages: 1, data page slots: 1, average fill: 15% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY1 (0) + Depth: 1, leaf buckets: 1, nodes: 14 + Average data length: 6.50, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (132) + Primary pointer page: 188, Index root page: 189 + Data pages: 1, data page slots: 1, average fill: 53% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTNAMEX (1) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 15.87, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTREGION (2) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 17.27, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN23 (3) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 4.87, total dup: 4, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY22 (0) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 1.13, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + Primary pointer page: 184, Index root page: 185 + Data pages: 1, data page slots: 1, average fill: 47% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index BUDGETX (2) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 5.38, total dup: 7, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$4 (0) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 13.95, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN10 (4) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 1.14, total dup: 3, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN6 (3) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 0.81, total dup: 13, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY5 (1) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 1.71, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + Primary pointer page: 186, Index root page: 187 + Data pages: 2, data page slots: 2, average fill: 44% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index NAMEX (1) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 15.52, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN8 (2) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 0.81, total dup: 23, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN9 (3) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 6.79, total dup: 15, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY7 (0) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 1.31, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (135) + Primary pointer page: 195, Index root page: 196 + Data pages: 1, data page slots: 1, average fill: 20% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN15 (1) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 1.04, total dup: 6, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN16 (2) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 0.86, total dup: 23, max dup: 9 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY14 (0) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 9.11, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + Primary pointer page: 182, Index root page: 183 + Data pages: 3, data page slots: 3, average fill: 73% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 2 + + Index MAXSALX (2) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.90, total dup: 5, max dup: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index MINSALX (1) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.29, total dup: 7, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN3 (3) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 1.39, total dup: 24, max dup: 20 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY2 (0) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.45, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJECT (134) + Primary pointer page: 193, Index root page: 194 + Data pages: 1, data page slots: 1, average fill: 29% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index PRODTYPEX (2) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 22.50, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$11 (0) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 13.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN13 (3) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 1.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY12 (1) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 4.83, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (136) + Primary pointer page: 197, Index root page: 198 + Data pages: 1, data page slots: 1, average fill: 80% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + + Index RDB$FOREIGN18 (1) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 0.71, total dup: 15, max dup: 5 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN19 (2) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 1.00, total dup: 19, max dup: 8 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY17 (0) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 6.83, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (137) + Primary pointer page: 199, Index root page: 200 + Data pages: 1, data page slots: 1, average fill: 58% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CHANGEX (2) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.31, total dup: 46, max dup: 21 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN21 (3) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.90, total dup: 16, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY20 (0) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 18.29, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index UPDATERX (1) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.29, total dup: 46, max dup: 28 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + Primary pointer page: 201, Index root page: 202 + Data pages: 1, data page slots: 1, average fill: 68% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index NEEDX (1) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 2.55, total dup: 11, max dup: 6 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index QTYX (3) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 1.85, total dup: 11, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN25 (4) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.52, total dup: 18, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN26 (5) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.45, total dup: 25, max dup: 7 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY24 (0) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 4.48, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index SALESTATX (2) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.97, total dup: 27, max dup: 14 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (235) + Primary pointer page: 205, Index root page: 282 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY104 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (141) + Primary pointer page: 207, Index root page: 208 + Data pages: 1, data page slots: 1, average fill: 20% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T3 (139) + Primary pointer page: 203, Index root page: 204 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T4 (133) + Primary pointer page: 191, Index root page: 192 + Data pages: 1, data page slots: 1, average fill: 4% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + diff -Nru fdb-1.6.1+dfsg1/test/gstat25-d.out fdb-2.0.0/test/gstat25-d.out --- fdb-1.6.1+dfsg1/test/gstat25-d.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat25-d.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,184 @@ + +Database "/home/fdb/test/fbtest25.fdb" +Gstat execution time Wed Apr 4 15:32:25 2018 + +Database header page information: + Flags 0 + Checksum 12345 + Generation 2856 + Page size 4096 + ODS version 11.2 + Oldest transaction 204 + Oldest active 1811 + Oldest snapshot 1811 + Next transaction 1811 + Bumped transaction 1 + Sequence number 0 + Next attachment ID 1071 + Implementation ID 24 + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date May 27, 2013 23:40:53 + Attributes force write + + Variable header data: + Sweep interval: 20000 + *END* + + +Database file sequence: +File /home/fdb/test/fbtest25.fdb is the only file + +Analyzing database pages ... +AR (142) + Primary pointer page: 209, Index root page: 210 + Data pages: 1, data page slots: 1, average fill: 86% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + +COUNTRY (128) + Primary pointer page: 180, Index root page: 181 + Data pages: 1, data page slots: 1, average fill: 15% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (132) + Primary pointer page: 188, Index root page: 189 + Data pages: 1, data page slots: 1, average fill: 53% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + Primary pointer page: 184, Index root page: 185 + Data pages: 1, data page slots: 1, average fill: 47% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + Primary pointer page: 186, Index root page: 187 + Data pages: 2, data page slots: 2, average fill: 44% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (135) + Primary pointer page: 195, Index root page: 196 + Data pages: 1, data page slots: 1, average fill: 20% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + Primary pointer page: 182, Index root page: 183 + Data pages: 3, data page slots: 3, average fill: 73% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 2 + +PROJECT (134) + Primary pointer page: 193, Index root page: 194 + Data pages: 1, data page slots: 1, average fill: 29% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (136) + Primary pointer page: 197, Index root page: 198 + Data pages: 1, data page slots: 1, average fill: 80% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + +SALARY_HISTORY (137) + Primary pointer page: 199, Index root page: 200 + Data pages: 1, data page slots: 1, average fill: 58% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + Primary pointer page: 201, Index root page: 202 + Data pages: 1, data page slots: 1, average fill: 68% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + +T (235) + Primary pointer page: 205, Index root page: 282 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (141) + Primary pointer page: 207, Index root page: 208 + Data pages: 1, data page slots: 1, average fill: 20% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T3 (139) + Primary pointer page: 203, Index root page: 204 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T4 (133) + Primary pointer page: 191, Index root page: 192 + Data pages: 1, data page slots: 1, average fill: 4% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + diff -Nru fdb-1.6.1+dfsg1/test/gstat25-f.out fdb-2.0.0/test/gstat25-f.out --- fdb-1.6.1+dfsg1/test/gstat25-f.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat25-f.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,1490 @@ + +Database "/home/fdb/test/fbtest25.fdb" +Gstat execution time Wed Apr 4 15:47:01 2018 + +Database header page information: + Flags 0 + Checksum 12345 + Generation 2859 + Page size 4096 + ODS version 11.2 + Oldest transaction 204 + Oldest active 1812 + Oldest snapshot 1812 + Next transaction 1812 + Bumped transaction 1 + Sequence number 0 + Next attachment ID 1072 + Implementation ID 24 + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date May 27, 2013 23:40:53 + Attributes force write + + Variable header data: + Sweep interval: 20000 + *END* + + +Database file sequence: +File /home/fdb/test/fbtest25.fdb is the only file + +Analyzing database pages ... +AR (142) + Primary pointer page: 209, Index root page: 210 + Average record length: 22.07, total records: 15 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 86% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + +COUNTRY (128) + Primary pointer page: 180, Index root page: 181 + Average record length: 26.86, total records: 14 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 15% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY1 (0) + Depth: 1, leaf buckets: 1, nodes: 14 + Average data length: 6.50, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (132) + Primary pointer page: 188, Index root page: 189 + Average record length: 126.47, total records: 15 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 53% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTNAMEX (1) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 15.87, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTREGION (2) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 17.27, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN23 (3) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 4.87, total dup: 4, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY22 (0) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 1.13, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + Primary pointer page: 184, Index root page: 185 + Average record length: 73.62, total records: 21 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 47% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index BUDGETX (2) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 5.38, total dup: 7, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$4 (0) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 13.95, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN10 (4) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 1.14, total dup: 3, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN6 (3) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 0.81, total dup: 13, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY5 (1) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 1.71, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + Primary pointer page: 186, Index root page: 187 + Average record length: 68.86, total records: 42 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 2, data page slots: 2, average fill: 44% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index NAMEX (1) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 15.52, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN8 (2) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 0.81, total dup: 23, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN9 (3) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 6.79, total dup: 15, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY7 (0) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 1.31, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (135) + Primary pointer page: 195, Index root page: 196 + Average record length: 12.00, total records: 28 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 20% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN15 (1) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 1.04, total dup: 6, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN16 (2) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 0.86, total dup: 23, max dup: 9 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY14 (0) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 9.11, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + Primary pointer page: 182, Index root page: 183 + Average record length: 67.13, total records: 31 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 3, data page slots: 3, average fill: 73% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 2 + + Index MAXSALX (2) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.90, total dup: 5, max dup: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index MINSALX (1) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.29, total dup: 7, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN3 (3) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 1.39, total dup: 24, max dup: 20 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY2 (0) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.45, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJECT (134) + Primary pointer page: 193, Index root page: 194 + Average record length: 48.83, total records: 6 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 29% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index PRODTYPEX (2) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 22.50, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$11 (0) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 13.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN13 (3) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 1.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY12 (1) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 4.83, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (136) + Primary pointer page: 197, Index root page: 198 + Average record length: 30.96, total records: 24 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 80% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + + Index RDB$FOREIGN18 (1) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 0.71, total dup: 15, max dup: 5 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN19 (2) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 1.00, total dup: 19, max dup: 8 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY17 (0) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 6.83, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$BACKUP_HISTORY (32) + Primary pointer page: 68, Index root page: 69 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_44 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$CHARACTER_SETS (28) + Primary pointer page: 60, Index root page: 61 + Average record length: 35.67, total records: 52 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 69% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_19 (0) + Depth: 1, leaf buckets: 1, nodes: 52 + Average data length: 2.98, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_25 (1) + Depth: 1, leaf buckets: 1, nodes: 52 + Average data length: 1.04, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$CHECK_CONSTRAINTS (24) + Primary pointer page: 52, Index root page: 53 + Average record length: 25.77, total records: 70 + Average version length: 21.00, total versions: 1, max versions: 1 + Data pages: 2, data page slots: 2, average fill: 37% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_14 (0) + Depth: 1, leaf buckets: 1, nodes: 70 + Average data length: 0.90, total dup: 14, max dup: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_40 (1) + Depth: 1, leaf buckets: 1, nodes: 70 + Average data length: 3.81, total dup: 11, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$COLLATIONS (29) + Primary pointer page: 62, Index root page: 63 + Average record length: 25.87, total records: 149 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 3, data page slots: 3, average fill: 55% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 0 + + Index RDB$INDEX_20 (0) + Depth: 1, leaf buckets: 1, nodes: 149 + Average data length: 3.77, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_26 (1) + Depth: 1, leaf buckets: 1, nodes: 149 + Average data length: 1.79, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$DATABASE (1) + Primary pointer page: 6, Index root page: 7 + Average record length: 15.00, total records: 1 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 1% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$DEPENDENCIES (13) + Primary pointer page: 30, Index root page: 31 + Average record length: 42.63, total records: 163 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 5, data page slots: 6, average fill: 49% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 1 + 60 - 79% = 2 + 80 - 99% = 0 + + Index RDB$INDEX_27 (0) + Depth: 1, leaf buckets: 1, nodes: 163 + Average data length: 1.18, total dup: 118, max dup: 13 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_28 (1) + Depth: 1, leaf buckets: 1, nodes: 163 + Average data length: 1.01, total dup: 145, max dup: 36 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$EXCEPTIONS (30) + Primary pointer page: 64, Index root page: 65 + Average record length: 82.40, total records: 5 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 12% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_23 (0) + Depth: 1, leaf buckets: 1, nodes: 5 + Average data length: 14.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_24 (1) + Depth: 1, leaf buckets: 1, nodes: 5 + Average data length: 1.20, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FIELDS (2) + Primary pointer page: 8, Index root page: 9 + Average record length: 35.91, total records: 245 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 6, data page slots: 6, average fill: 62% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 4 + 80 - 99% = 1 + + Index RDB$INDEX_2 (0) + Depth: 1, leaf buckets: 1, nodes: 245 + Average data length: 4.58, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FIELD_DIMENSIONS (21) + Primary pointer page: 46, Index root page: 47 + Average record length: 23.84, total records: 19 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 19% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_36 (0) + Depth: 1, leaf buckets: 1, nodes: 19 + Average data length: 1.26, total dup: 3, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FILES (10) + Primary pointer page: 24, Index root page: 25 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FILTERS (16) + Primary pointer page: 36, Index root page: 37 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_17 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_45 (1) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FORMATS (8) + Primary pointer page: 20, Index root page: 21 + Average record length: 15.79, total records: 19 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 76% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_16 (0) + Depth: 1, leaf buckets: 1, nodes: 19 + Average data length: 4.63, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FUNCTIONS (14) + Primary pointer page: 32, Index root page: 33 + Average record length: 63.00, total records: 2 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 4% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_9 (0) + Depth: 1, leaf buckets: 1, nodes: 2 + Average data length: 13.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FUNCTION_ARGUMENTS (15) + Primary pointer page: 34, Index root page: 35 + Average record length: 37.00, total records: 7 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 9% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_10 (0) + Depth: 1, leaf buckets: 1, nodes: 7 + Average data length: 3.71, total dup: 5, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$GENERATORS (20) + Primary pointer page: 44, Index root page: 45 + Average record length: 31.91, total records: 11 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 22% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_11 (0) + Depth: 1, leaf buckets: 1, nodes: 11 + Average data length: 11.91, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_46 (1) + Depth: 1, leaf buckets: 1, nodes: 11 + Average data length: 1.09, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$INDEX_SEGMENTS (3) + Primary pointer page: 10, Index root page: 11 + Average record length: 29.14, total records: 150 + Average version length: 27.11, total versions: 38, max versions: 1 + Data pages: 3, data page slots: 3, average fill: 79% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 1 + + Index RDB$INDEX_6 (0) + Depth: 1, leaf buckets: 1, nodes: 150 + Average data length: 1.50, total dup: 24, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$INDICES (4) + Primary pointer page: 12, Index root page: 13 + Average record length: 49.20, total records: 88 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 4, data page slots: 4, average fill: 48% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 0 + + Index RDB$INDEX_31 (1) + Depth: 1, leaf buckets: 1, nodes: 88 + Average data length: 4.23, total dup: 48, max dup: 5 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_41 (2) + Depth: 1, leaf buckets: 1, nodes: 88 + Average data length: 0.19, total dup: 81, max dup: 73 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_5 (0) + Depth: 1, leaf buckets: 1, nodes: 88 + Average data length: 2.09, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$LOG_FILES (25) + Primary pointer page: 54, Index root page: 55 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PAGES (0) + Primary pointer page: 3, Index root page: 4 + Average record length: 14.84, total records: 98 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 2, data page slots: 2, average fill: 38% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PROCEDURES (26) + Primary pointer page: 56, Index root page: 57 + Average record length: 62.20, total records: 10 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 3, data page slots: 3, average fill: 94% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 3 + + Index RDB$INDEX_21 (0) + Depth: 1, leaf buckets: 1, nodes: 10 + Average data length: 10.60, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_22 (1) + Depth: 1, leaf buckets: 1, nodes: 10 + Average data length: 1.10, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PROCEDURE_PARAMETERS (27) + Primary pointer page: 58, Index root page: 59 + Average record length: 41.73, total records: 33 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 48% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_18 (0) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 11.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_47 (1) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 1.24, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_48 (2) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.00, total dup: 32, max dup: 32 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$REF_CONSTRAINTS (23) + Primary pointer page: 50, Index root page: 51 + Average record length: 54.86, total records: 14 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 25% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_13 (0) + Depth: 1, leaf buckets: 1, nodes: 14 + Average data length: 1.93, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATIONS (6) + Primary pointer page: 16, Index root page: 17 + Average record length: 62.38, total records: 58 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 5, data page slots: 6, average fill: 71% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 2 + 60 - 79% = 1 + 80 - 99% = 2 + + Index RDB$INDEX_0 (0) + Depth: 1, leaf buckets: 1, nodes: 58 + Average data length: 8.81, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_1 (1) + Depth: 1, leaf buckets: 1, nodes: 73 + Average data length: 0.82, total dup: 14, max dup: 14 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATION_CONSTRAINTS (22) + Primary pointer page: 48, Index root page: 49 + Average record length: 49.76, total records: 82 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 2, data page slots: 2, average fill: 67% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 0 + + Index RDB$INDEX_12 (0) + Depth: 1, leaf buckets: 1, nodes: 82 + Average data length: 1.07, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_42 (1) + Depth: 1, leaf buckets: 1, nodes: 82 + Average data length: 6.43, total dup: 43, max dup: 8 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_43 (2) + Depth: 1, leaf buckets: 1, nodes: 82 + Average data length: 0.60, total dup: 54, max dup: 54 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATION_FIELDS (5) + Primary pointer page: 14, Index root page: 15 + Average record length: 68.27, total records: 466 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 13, data page slots: 13, average fill: 77% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 9 + 80 - 99% = 3 + + Index RDB$INDEX_15 (2) + Depth: 2, leaf buckets: 4, nodes: 466 + Average data length: 20.92, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 1 + 80 - 99% = 2 + + Index RDB$INDEX_3 (0) + Depth: 1, leaf buckets: 1, nodes: 466 + Average data length: 2.33, total dup: 255, max dup: 31 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_4 (1) + Depth: 1, leaf buckets: 1, nodes: 466 + Average data length: 1.10, total dup: 408, max dup: 27 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$ROLES (31) + Primary pointer page: 66, Index root page: 67 + Average record length: 28.50, total records: 2 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 2% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_39 (0) + Depth: 1, leaf buckets: 1, nodes: 2 + Average data length: 9.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$SECURITY_CLASSES (9) + Primary pointer page: 22, Index root page: 23 + Average record length: 22.54, total records: 182 + Average version length: 24.43, total versions: 42, max versions: 1 + Data pages: 6, data page slots: 6, average fill: 77% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 1 + 80 - 99% = 4 + + Index RDB$INDEX_7 (0) + Depth: 1, leaf buckets: 1, nodes: 182 + Average data length: 1.06, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRANSACTIONS (19) + Primary pointer page: 42, Index root page: 43 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_32 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRIGGERS (12) + Primary pointer page: 28, Index root page: 29 + Average record length: 56.72, total records: 69 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 7, data page slots: 7, average fill: 90% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 5 + + Index RDB$INDEX_38 (1) + Depth: 1, leaf buckets: 1, nodes: 69 + Average data length: 2.84, total dup: 48, max dup: 18 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_8 (0) + Depth: 1, leaf buckets: 1, nodes: 69 + Average data length: 2.09, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRIGGER_MESSAGES (17) + Primary pointer page: 38, Index root page: 39 + Average record length: 60.11, total records: 36 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 68% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_35 (0) + Depth: 1, leaf buckets: 1, nodes: 36 + Average data length: 1.00, total dup: 12, max dup: 5 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TYPES (11) + Primary pointer page: 26, Index root page: 27 + Average record length: 45.56, total records: 228 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 5, data page slots: 5, average fill: 70% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 5 + 80 - 99% = 0 + + Index RDB$INDEX_37 (0) + Depth: 1, leaf buckets: 1, nodes: 228 + Average data length: 4.22, total dup: 16, max dup: 1 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$USER_PRIVILEGES (18) + Primary pointer page: 40, Index root page: 41 + Average record length: 45.69, total records: 173 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 4, data page slots: 4, average fill: 67% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 3 + 80 - 99% = 0 + + Index RDB$INDEX_29 (0) + Depth: 1, leaf buckets: 1, nodes: 173 + Average data length: 1.24, total dup: 144, max dup: 9 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_30 (1) + Depth: 1, leaf buckets: 1, nodes: 173 + Average data length: 0.07, total dup: 171, max dup: 104 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$VIEW_RELATIONS (7) + Primary pointer page: 18, Index root page: 19 + Average record length: 45.00, total records: 2 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 3% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_33 (0) + Depth: 1, leaf buckets: 1, nodes: 2 + Average data length: 5.00, total dup: 1, max dup: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_34 (1) + Depth: 1, leaf buckets: 1, nodes: 2 + Average data length: 9.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (137) + Primary pointer page: 199, Index root page: 200 + Average record length: 31.51, total records: 49 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 58% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CHANGEX (2) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.31, total dup: 46, max dup: 21 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN21 (3) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.90, total dup: 16, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY20 (0) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 18.29, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index UPDATERX (1) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.29, total dup: 46, max dup: 28 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + Primary pointer page: 201, Index root page: 202 + Average record length: 67.24, total records: 33 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 68% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index NEEDX (1) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 2.55, total dup: 11, max dup: 6 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index QTYX (3) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 1.85, total dup: 11, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN25 (4) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.52, total dup: 18, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN26 (5) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.45, total dup: 25, max dup: 7 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY24 (0) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 4.48, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index SALESTATX (2) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.97, total dup: 27, max dup: 14 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (235) + Primary pointer page: 205, Index root page: 282 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY104 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (141) + Primary pointer page: 207, Index root page: 208 + Average record length: 0.00, total records: 2 + Average version length: 17.00, total versions: 2, max versions: 1 + Data pages: 1, data page slots: 1, average fill: 20% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T3 (139) + Primary pointer page: 203, Index root page: 204 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T4 (133) + Primary pointer page: 191, Index root page: 192 + Average record length: 0.00, total records: 1 + Average version length: 129.00, total versions: 1, max versions: 1 + Data pages: 1, data page slots: 1, average fill: 4% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + diff -Nru fdb-1.6.1+dfsg1/test/gstat25-h.out fdb-2.0.0/test/gstat25-h.out --- fdb-1.6.1+dfsg1/test/gstat25-h.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat25-h.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,28 @@ + +Database "/home/fdb/test/fbtest25.fdb" +Gstat execution time Wed Apr 4 15:29:10 2018 + +Database header page information: + Flags 0 + Checksum 12345 + Generation 2844 + Page size 4096 + ODS version 11.2 + Oldest transaction 204 + Oldest active 1807 + Oldest snapshot 1807 + Next transaction 1807 + Bumped transaction 1 + Sequence number 0 + Next attachment ID 1067 + Implementation ID 24 + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date May 27, 2013 23:40:53 + Attributes force write + + Variable header data: + Sweep interval: 20000 + *END* diff -Nru fdb-1.6.1+dfsg1/test/gstat25-i.out fdb-2.0.0/test/gstat25-i.out --- fdb-1.6.1+dfsg1/test/gstat25-i.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat25-i.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,454 @@ + +Database "/home/fdb/test/fbtest25.fdb" +Gstat execution time Wed Apr 4 15:32:12 2018 + +Database header page information: + Flags 0 + Checksum 12345 + Generation 2853 + Page size 4096 + ODS version 11.2 + Oldest transaction 204 + Oldest active 1810 + Oldest snapshot 1810 + Next transaction 1810 + Bumped transaction 1 + Sequence number 0 + Next attachment ID 1070 + Implementation ID 24 + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date May 27, 2013 23:40:53 + Attributes force write + + Variable header data: + Sweep interval: 20000 + *END* + + +Database file sequence: +File /home/fdb/test/fbtest25.fdb is the only file + +Analyzing database pages ... +AR (142) + +COUNTRY (128) + + Index RDB$PRIMARY1 (0) + Depth: 1, leaf buckets: 1, nodes: 14 + Average data length: 6.50, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (132) + + Index CUSTNAMEX (1) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 15.87, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTREGION (2) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 17.27, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN23 (3) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 4.87, total dup: 4, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY22 (0) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 1.13, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + + Index BUDGETX (2) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 5.38, total dup: 7, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$4 (0) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 13.95, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN10 (4) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 1.14, total dup: 3, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN6 (3) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 0.81, total dup: 13, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY5 (1) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 1.71, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + + Index NAMEX (1) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 15.52, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN8 (2) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 0.81, total dup: 23, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN9 (3) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 6.79, total dup: 15, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY7 (0) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 1.31, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (135) + + Index RDB$FOREIGN15 (1) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 1.04, total dup: 6, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN16 (2) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 0.86, total dup: 23, max dup: 9 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY14 (0) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 9.11, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + + Index MAXSALX (2) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.90, total dup: 5, max dup: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index MINSALX (1) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.29, total dup: 7, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN3 (3) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 1.39, total dup: 24, max dup: 20 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY2 (0) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.45, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJECT (134) + + Index PRODTYPEX (2) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 22.50, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$11 (0) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 13.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN13 (3) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 1.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY12 (1) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 4.83, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (136) + + Index RDB$FOREIGN18 (1) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 0.71, total dup: 15, max dup: 5 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN19 (2) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 1.00, total dup: 19, max dup: 8 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY17 (0) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 6.83, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (137) + + Index CHANGEX (2) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.31, total dup: 46, max dup: 21 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN21 (3) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.90, total dup: 16, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY20 (0) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 18.29, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index UPDATERX (1) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.29, total dup: 46, max dup: 28 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + + Index NEEDX (1) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 2.55, total dup: 11, max dup: 6 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index QTYX (3) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 1.85, total dup: 11, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN25 (4) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.52, total dup: 18, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN26 (5) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.45, total dup: 25, max dup: 7 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY24 (0) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 4.48, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index SALESTATX (2) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.97, total dup: 27, max dup: 14 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (235) + + Index RDB$PRIMARY104 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (141) + +T3 (139) + +T4 (133) + diff -Nru fdb-1.6.1+dfsg1/test/gstat25-r.out fdb-2.0.0/test/gstat25-r.out --- fdb-1.6.1+dfsg1/test/gstat25-r.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat25-r.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,604 @@ + +Database "/home/fdb/test/fbtest25.fdb" +Gstat execution time Wed Apr 4 15:31:28 2018 + +Database header page information: + Flags 0 + Checksum 12345 + Generation 2850 + Page size 4096 + ODS version 11.2 + Oldest transaction 204 + Oldest active 1809 + Oldest snapshot 1809 + Next transaction 1809 + Bumped transaction 1 + Sequence number 0 + Next attachment ID 1069 + Implementation ID 24 + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date May 27, 2013 23:40:53 + Attributes force write + + Variable header data: + Sweep interval: 20000 + *END* + + +Database file sequence: +File /home/fdb/test/fbtest25.fdb is the only file + +Analyzing database pages ... +AR (142) + Primary pointer page: 209, Index root page: 210 + Average record length: 22.07, total records: 15 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 86% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + +COUNTRY (128) + Primary pointer page: 180, Index root page: 181 + Average record length: 26.86, total records: 14 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 15% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY1 (0) + Depth: 1, leaf buckets: 1, nodes: 14 + Average data length: 6.50, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (132) + Primary pointer page: 188, Index root page: 189 + Average record length: 126.47, total records: 15 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 53% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTNAMEX (1) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 15.87, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTREGION (2) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 17.27, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN23 (3) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 4.87, total dup: 4, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY22 (0) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 1.13, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + Primary pointer page: 184, Index root page: 185 + Average record length: 73.62, total records: 21 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 47% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index BUDGETX (2) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 5.38, total dup: 7, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$4 (0) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 13.95, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN10 (4) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 1.14, total dup: 3, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN6 (3) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 0.81, total dup: 13, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY5 (1) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 1.71, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + Primary pointer page: 186, Index root page: 187 + Average record length: 68.86, total records: 42 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 2, data page slots: 2, average fill: 44% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index NAMEX (1) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 15.52, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN8 (2) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 0.81, total dup: 23, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN9 (3) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 6.79, total dup: 15, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY7 (0) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 1.31, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (135) + Primary pointer page: 195, Index root page: 196 + Average record length: 12.00, total records: 28 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 20% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN15 (1) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 1.04, total dup: 6, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN16 (2) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 0.86, total dup: 23, max dup: 9 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY14 (0) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 9.11, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + Primary pointer page: 182, Index root page: 183 + Average record length: 67.13, total records: 31 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 3, data page slots: 3, average fill: 73% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 2 + + Index MAXSALX (2) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.90, total dup: 5, max dup: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index MINSALX (1) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.29, total dup: 7, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN3 (3) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 1.39, total dup: 24, max dup: 20 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY2 (0) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.45, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJECT (134) + Primary pointer page: 193, Index root page: 194 + Average record length: 48.83, total records: 6 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 29% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index PRODTYPEX (2) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 22.50, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$11 (0) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 13.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN13 (3) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 1.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY12 (1) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 4.83, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (136) + Primary pointer page: 197, Index root page: 198 + Average record length: 30.96, total records: 24 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 80% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + + Index RDB$FOREIGN18 (1) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 0.71, total dup: 15, max dup: 5 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN19 (2) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 1.00, total dup: 19, max dup: 8 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY17 (0) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 6.83, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (137) + Primary pointer page: 199, Index root page: 200 + Average record length: 31.51, total records: 49 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 58% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CHANGEX (2) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.31, total dup: 46, max dup: 21 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN21 (3) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.90, total dup: 16, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY20 (0) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 18.29, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index UPDATERX (1) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.29, total dup: 46, max dup: 28 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + Primary pointer page: 201, Index root page: 202 + Average record length: 67.24, total records: 33 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 1, data page slots: 1, average fill: 68% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index NEEDX (1) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 2.55, total dup: 11, max dup: 6 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index QTYX (3) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 1.85, total dup: 11, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN25 (4) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.52, total dup: 18, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN26 (5) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.45, total dup: 25, max dup: 7 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY24 (0) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 4.48, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index SALESTATX (2) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.97, total dup: 27, max dup: 14 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (235) + Primary pointer page: 205, Index root page: 282 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY104 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (141) + Primary pointer page: 207, Index root page: 208 + Average record length: 0.00, total records: 2 + Average version length: 17.00, total versions: 2, max versions: 1 + Data pages: 1, data page slots: 1, average fill: 20% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T3 (139) + Primary pointer page: 203, Index root page: 204 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T4 (133) + Primary pointer page: 191, Index root page: 192 + Average record length: 0.00, total records: 1 + Average version length: 129.00, total versions: 1, max versions: 1 + Data pages: 1, data page slots: 1, average fill: 4% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + diff -Nru fdb-1.6.1+dfsg1/test/gstat25-s.out fdb-2.0.0/test/gstat25-s.out --- fdb-1.6.1+dfsg1/test/gstat25-s.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat25-s.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,1394 @@ + +Database "/home/fdb/test/fbtest25.fdb" +Gstat execution time Wed Apr 4 15:30:46 2018 + +Database header page information: + Flags 0 + Checksum 12345 + Generation 2847 + Page size 4096 + ODS version 11.2 + Oldest transaction 204 + Oldest active 1808 + Oldest snapshot 1808 + Next transaction 1808 + Bumped transaction 1 + Sequence number 0 + Next attachment ID 1068 + Implementation ID 24 + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date May 27, 2013 23:40:53 + Attributes force write + + Variable header data: + Sweep interval: 20000 + *END* + + +Database file sequence: +File /home/fdb/test/fbtest25.fdb is the only file + +Analyzing database pages ... +AR (142) + Primary pointer page: 209, Index root page: 210 + Data pages: 1, data page slots: 1, average fill: 86% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + +COUNTRY (128) + Primary pointer page: 180, Index root page: 181 + Data pages: 1, data page slots: 1, average fill: 15% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY1 (0) + Depth: 1, leaf buckets: 1, nodes: 14 + Average data length: 6.50, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (132) + Primary pointer page: 188, Index root page: 189 + Data pages: 1, data page slots: 1, average fill: 53% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTNAMEX (1) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 15.87, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTREGION (2) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 17.27, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN23 (3) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 4.87, total dup: 4, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY22 (0) + Depth: 1, leaf buckets: 1, nodes: 15 + Average data length: 1.13, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + Primary pointer page: 184, Index root page: 185 + Data pages: 1, data page slots: 1, average fill: 47% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index BUDGETX (2) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 5.38, total dup: 7, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$4 (0) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 13.95, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN10 (4) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 1.14, total dup: 3, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN6 (3) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 0.81, total dup: 13, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY5 (1) + Depth: 1, leaf buckets: 1, nodes: 21 + Average data length: 1.71, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + Primary pointer page: 186, Index root page: 187 + Data pages: 2, data page slots: 2, average fill: 44% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index NAMEX (1) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 15.52, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN8 (2) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 0.81, total dup: 23, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN9 (3) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 6.79, total dup: 15, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY7 (0) + Depth: 1, leaf buckets: 1, nodes: 42 + Average data length: 1.31, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (135) + Primary pointer page: 195, Index root page: 196 + Data pages: 1, data page slots: 1, average fill: 20% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN15 (1) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 1.04, total dup: 6, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN16 (2) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 0.86, total dup: 23, max dup: 9 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY14 (0) + Depth: 1, leaf buckets: 1, nodes: 28 + Average data length: 9.11, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + Primary pointer page: 182, Index root page: 183 + Data pages: 3, data page slots: 3, average fill: 73% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 2 + + Index MAXSALX (2) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.90, total dup: 5, max dup: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index MINSALX (1) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.29, total dup: 7, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN3 (3) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 1.39, total dup: 24, max dup: 20 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY2 (0) + Depth: 1, leaf buckets: 1, nodes: 31 + Average data length: 10.45, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJECT (134) + Primary pointer page: 193, Index root page: 194 + Data pages: 1, data page slots: 1, average fill: 29% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index PRODTYPEX (2) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 22.50, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$11 (0) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 13.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN13 (3) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 1.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY12 (1) + Depth: 1, leaf buckets: 1, nodes: 6 + Average data length: 4.83, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (136) + Primary pointer page: 197, Index root page: 198 + Data pages: 1, data page slots: 1, average fill: 80% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + + Index RDB$FOREIGN18 (1) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 0.71, total dup: 15, max dup: 5 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN19 (2) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 1.00, total dup: 19, max dup: 8 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY17 (0) + Depth: 1, leaf buckets: 1, nodes: 24 + Average data length: 6.83, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$BACKUP_HISTORY (32) + Primary pointer page: 68, Index root page: 69 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_44 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$CHARACTER_SETS (28) + Primary pointer page: 60, Index root page: 61 + Data pages: 1, data page slots: 1, average fill: 69% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_19 (0) + Depth: 1, leaf buckets: 1, nodes: 52 + Average data length: 2.98, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_25 (1) + Depth: 1, leaf buckets: 1, nodes: 52 + Average data length: 1.04, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$CHECK_CONSTRAINTS (24) + Primary pointer page: 52, Index root page: 53 + Data pages: 2, data page slots: 2, average fill: 37% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_14 (0) + Depth: 1, leaf buckets: 1, nodes: 70 + Average data length: 0.90, total dup: 14, max dup: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_40 (1) + Depth: 1, leaf buckets: 1, nodes: 70 + Average data length: 3.81, total dup: 11, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$COLLATIONS (29) + Primary pointer page: 62, Index root page: 63 + Data pages: 3, data page slots: 3, average fill: 55% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 0 + + Index RDB$INDEX_20 (0) + Depth: 1, leaf buckets: 1, nodes: 149 + Average data length: 3.77, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_26 (1) + Depth: 1, leaf buckets: 1, nodes: 149 + Average data length: 1.79, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$DATABASE (1) + Primary pointer page: 6, Index root page: 7 + Data pages: 1, data page slots: 1, average fill: 1% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$DEPENDENCIES (13) + Primary pointer page: 30, Index root page: 31 + Data pages: 5, data page slots: 6, average fill: 49% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 1 + 60 - 79% = 2 + 80 - 99% = 0 + + Index RDB$INDEX_27 (0) + Depth: 1, leaf buckets: 1, nodes: 163 + Average data length: 1.18, total dup: 118, max dup: 13 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_28 (1) + Depth: 1, leaf buckets: 1, nodes: 163 + Average data length: 1.01, total dup: 145, max dup: 36 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$EXCEPTIONS (30) + Primary pointer page: 64, Index root page: 65 + Data pages: 1, data page slots: 1, average fill: 12% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_23 (0) + Depth: 1, leaf buckets: 1, nodes: 5 + Average data length: 14.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_24 (1) + Depth: 1, leaf buckets: 1, nodes: 5 + Average data length: 1.20, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FIELDS (2) + Primary pointer page: 8, Index root page: 9 + Data pages: 6, data page slots: 6, average fill: 62% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 4 + 80 - 99% = 1 + + Index RDB$INDEX_2 (0) + Depth: 1, leaf buckets: 1, nodes: 245 + Average data length: 4.58, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FIELD_DIMENSIONS (21) + Primary pointer page: 46, Index root page: 47 + Data pages: 1, data page slots: 1, average fill: 19% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_36 (0) + Depth: 1, leaf buckets: 1, nodes: 19 + Average data length: 1.26, total dup: 3, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FILES (10) + Primary pointer page: 24, Index root page: 25 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FILTERS (16) + Primary pointer page: 36, Index root page: 37 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_17 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_45 (1) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FORMATS (8) + Primary pointer page: 20, Index root page: 21 + Data pages: 1, data page slots: 1, average fill: 76% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_16 (0) + Depth: 1, leaf buckets: 1, nodes: 19 + Average data length: 4.63, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FUNCTIONS (14) + Primary pointer page: 32, Index root page: 33 + Data pages: 1, data page slots: 1, average fill: 4% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_9 (0) + Depth: 1, leaf buckets: 1, nodes: 2 + Average data length: 13.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FUNCTION_ARGUMENTS (15) + Primary pointer page: 34, Index root page: 35 + Data pages: 1, data page slots: 1, average fill: 9% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_10 (0) + Depth: 1, leaf buckets: 1, nodes: 7 + Average data length: 3.71, total dup: 5, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$GENERATORS (20) + Primary pointer page: 44, Index root page: 45 + Data pages: 1, data page slots: 1, average fill: 22% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_11 (0) + Depth: 1, leaf buckets: 1, nodes: 11 + Average data length: 11.91, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_46 (1) + Depth: 1, leaf buckets: 1, nodes: 11 + Average data length: 1.09, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$INDEX_SEGMENTS (3) + Primary pointer page: 10, Index root page: 11 + Data pages: 3, data page slots: 3, average fill: 79% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 1 + + Index RDB$INDEX_6 (0) + Depth: 1, leaf buckets: 1, nodes: 150 + Average data length: 1.50, total dup: 24, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$INDICES (4) + Primary pointer page: 12, Index root page: 13 + Data pages: 4, data page slots: 4, average fill: 48% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 0 + + Index RDB$INDEX_31 (1) + Depth: 1, leaf buckets: 1, nodes: 88 + Average data length: 4.23, total dup: 48, max dup: 5 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_41 (2) + Depth: 1, leaf buckets: 1, nodes: 88 + Average data length: 0.19, total dup: 81, max dup: 73 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_5 (0) + Depth: 1, leaf buckets: 1, nodes: 88 + Average data length: 2.09, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$LOG_FILES (25) + Primary pointer page: 54, Index root page: 55 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PAGES (0) + Primary pointer page: 3, Index root page: 4 + Data pages: 2, data page slots: 2, average fill: 38% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PROCEDURES (26) + Primary pointer page: 56, Index root page: 57 + Data pages: 3, data page slots: 3, average fill: 94% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 3 + + Index RDB$INDEX_21 (0) + Depth: 1, leaf buckets: 1, nodes: 10 + Average data length: 10.60, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_22 (1) + Depth: 1, leaf buckets: 1, nodes: 10 + Average data length: 1.10, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PROCEDURE_PARAMETERS (27) + Primary pointer page: 58, Index root page: 59 + Data pages: 1, data page slots: 1, average fill: 48% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_18 (0) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 11.33, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_47 (1) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 1.24, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_48 (2) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.00, total dup: 32, max dup: 32 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$REF_CONSTRAINTS (23) + Primary pointer page: 50, Index root page: 51 + Data pages: 1, data page slots: 1, average fill: 25% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_13 (0) + Depth: 1, leaf buckets: 1, nodes: 14 + Average data length: 1.93, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATIONS (6) + Primary pointer page: 16, Index root page: 17 + Data pages: 5, data page slots: 6, average fill: 71% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 2 + 60 - 79% = 1 + 80 - 99% = 2 + + Index RDB$INDEX_0 (0) + Depth: 1, leaf buckets: 1, nodes: 58 + Average data length: 8.81, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_1 (1) + Depth: 1, leaf buckets: 1, nodes: 73 + Average data length: 0.82, total dup: 14, max dup: 14 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATION_CONSTRAINTS (22) + Primary pointer page: 48, Index root page: 49 + Data pages: 2, data page slots: 2, average fill: 67% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 0 + + Index RDB$INDEX_12 (0) + Depth: 1, leaf buckets: 1, nodes: 82 + Average data length: 1.07, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_42 (1) + Depth: 1, leaf buckets: 1, nodes: 82 + Average data length: 6.43, total dup: 43, max dup: 8 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_43 (2) + Depth: 1, leaf buckets: 1, nodes: 82 + Average data length: 0.60, total dup: 54, max dup: 54 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATION_FIELDS (5) + Primary pointer page: 14, Index root page: 15 + Data pages: 13, data page slots: 13, average fill: 77% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 9 + 80 - 99% = 3 + + Index RDB$INDEX_15 (2) + Depth: 2, leaf buckets: 4, nodes: 466 + Average data length: 20.92, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 1 + 80 - 99% = 2 + + Index RDB$INDEX_3 (0) + Depth: 1, leaf buckets: 1, nodes: 466 + Average data length: 2.33, total dup: 255, max dup: 31 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_4 (1) + Depth: 1, leaf buckets: 1, nodes: 466 + Average data length: 1.10, total dup: 408, max dup: 27 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$ROLES (31) + Primary pointer page: 66, Index root page: 67 + Data pages: 1, data page slots: 1, average fill: 2% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_39 (0) + Depth: 1, leaf buckets: 1, nodes: 2 + Average data length: 9.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$SECURITY_CLASSES (9) + Primary pointer page: 22, Index root page: 23 + Data pages: 6, data page slots: 6, average fill: 77% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 1 + 80 - 99% = 4 + + Index RDB$INDEX_7 (0) + Depth: 1, leaf buckets: 1, nodes: 182 + Average data length: 1.06, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRANSACTIONS (19) + Primary pointer page: 42, Index root page: 43 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_32 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRIGGERS (12) + Primary pointer page: 28, Index root page: 29 + Data pages: 7, data page slots: 7, average fill: 90% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 5 + + Index RDB$INDEX_38 (1) + Depth: 1, leaf buckets: 1, nodes: 69 + Average data length: 2.84, total dup: 48, max dup: 18 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_8 (0) + Depth: 1, leaf buckets: 1, nodes: 69 + Average data length: 2.09, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRIGGER_MESSAGES (17) + Primary pointer page: 38, Index root page: 39 + Data pages: 1, data page slots: 1, average fill: 68% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_35 (0) + Depth: 1, leaf buckets: 1, nodes: 36 + Average data length: 1.00, total dup: 12, max dup: 5 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TYPES (11) + Primary pointer page: 26, Index root page: 27 + Data pages: 5, data page slots: 5, average fill: 70% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 5 + 80 - 99% = 0 + + Index RDB$INDEX_37 (0) + Depth: 1, leaf buckets: 1, nodes: 228 + Average data length: 4.22, total dup: 16, max dup: 1 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$USER_PRIVILEGES (18) + Primary pointer page: 40, Index root page: 41 + Data pages: 4, data page slots: 4, average fill: 67% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 3 + 80 - 99% = 0 + + Index RDB$INDEX_29 (0) + Depth: 1, leaf buckets: 1, nodes: 173 + Average data length: 1.24, total dup: 144, max dup: 9 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_30 (1) + Depth: 1, leaf buckets: 1, nodes: 173 + Average data length: 0.07, total dup: 171, max dup: 104 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$VIEW_RELATIONS (7) + Primary pointer page: 18, Index root page: 19 + Data pages: 1, data page slots: 1, average fill: 3% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_33 (0) + Depth: 1, leaf buckets: 1, nodes: 2 + Average data length: 5.00, total dup: 1, max dup: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_34 (1) + Depth: 1, leaf buckets: 1, nodes: 2 + Average data length: 9.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (137) + Primary pointer page: 199, Index root page: 200 + Data pages: 1, data page slots: 1, average fill: 58% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CHANGEX (2) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.31, total dup: 46, max dup: 21 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN21 (3) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.90, total dup: 16, max dup: 2 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY20 (0) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 18.29, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index UPDATERX (1) + Depth: 1, leaf buckets: 1, nodes: 49 + Average data length: 0.29, total dup: 46, max dup: 28 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + Primary pointer page: 201, Index root page: 202 + Data pages: 1, data page slots: 1, average fill: 68% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index NEEDX (1) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 2.55, total dup: 11, max dup: 6 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index QTYX (3) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 1.85, total dup: 11, max dup: 3 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN25 (4) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.52, total dup: 18, max dup: 4 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN26 (5) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.45, total dup: 25, max dup: 7 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY24 (0) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 4.48, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index SALESTATX (2) + Depth: 1, leaf buckets: 1, nodes: 33 + Average data length: 0.97, total dup: 27, max dup: 14 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (235) + Primary pointer page: 205, Index root page: 282 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY104 (0) + Depth: 1, leaf buckets: 1, nodes: 0 + Average data length: 0.00, total dup: 0, max dup: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (141) + Primary pointer page: 207, Index root page: 208 + Data pages: 1, data page slots: 1, average fill: 20% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T3 (139) + Primary pointer page: 203, Index root page: 204 + Data pages: 0, data page slots: 0, average fill: 0% + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T4 (133) + Primary pointer page: 191, Index root page: 192 + Data pages: 1, data page slots: 1, average fill: 4% + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + diff -Nru fdb-1.6.1+dfsg1/test/gstat30-a.out fdb-2.0.0/test/gstat30-a.out --- fdb-1.6.1+dfsg1/test/gstat30-a.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat30-a.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,750 @@ + +Database "/home/fdb/test/FBTEST30.FDB" +Gstat execution time Wed Apr 04 15:42:00 2018 + +Database header page information: + Flags 0 + Generation 2176 + System Change Number 24 + Page size 8192 + ODS version 12.0 + Oldest transaction 179 + Oldest active 2140 + Oldest snapshot 2140 + Next transaction 2141 + Sequence number 0 + Next attachment ID 1199 + Implementation HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date Nov 27, 2015 11:19:39 + Attributes force write + + Variable header data: + Database backup GUID: {F978F787-7023-4C4A-F79D-8D86645B0487} + *END* + + +Database file sequence: +File /home/fdb/test/FBTEST30.FDB is the only file + +Analyzing database pages ... +AR (140) + Primary pointer page: 297, Index root page: 299 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 86% + Primary pages: 1, secondary pages: 2, swept pages: 0 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 2 + +COUNTRY (128) + Primary pointer page: 182, Index root page: 183 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 8% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY1 (0) + Root page: 186, depth: 1, leaf buckets: 1, nodes: 16 + Average node length: 10.44, total dup: 0, max dup: 0 + Average key length: 8.63, compression ratio: 0.80 + Average prefix length: 0.44, average data length: 6.44 + Clustering factor: 1, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (137) + Primary pointer page: 261, Index root page: 262 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 26% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTNAMEX (2) + Root page: 276, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 19.87, total dup: 0, max dup: 0 + Average key length: 18.27, compression ratio: 0.90 + Average prefix length: 0.60, average data length: 15.87 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTREGION (3) + Root page: 283, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 21.27, total dup: 0, max dup: 0 + Average key length: 20.20, compression ratio: 0.97 + Average prefix length: 2.33, average data length: 17.27 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN23 (1) + Root page: 264, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 8.60, total dup: 4, max dup: 4 + Average key length: 6.93, compression ratio: 0.83 + Average prefix length: 0.87, average data length: 4.87 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY22 (0) + Root page: 263, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 4.20, total dup: 0, max dup: 0 + Average key length: 3.13, compression ratio: 0.96 + Average prefix length: 1.87, average data length: 1.13 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + Primary pointer page: 198, Index root page: 199 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 24% + Primary pages: 1, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index BUDGETX (3) + Root page: 284, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 9.05, total dup: 7, max dup: 3 + Average key length: 8.00, compression ratio: 1.13 + Average prefix length: 3.62, average data length: 5.38 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$4 (0) + Root page: 208, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 17.95, total dup: 0, max dup: 0 + Average key length: 16.57, compression ratio: 1.16 + Average prefix length: 5.29, average data length: 13.95 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN10 (4) + Root page: 219, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 4.29, total dup: 3, max dup: 3 + Average key length: 3.24, compression ratio: 0.60 + Average prefix length: 0.81, average data length: 1.14 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN6 (2) + Root page: 210, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 4.10, total dup: 13, max dup: 4 + Average key length: 2.95, compression ratio: 0.97 + Average prefix length: 2.05, average data length: 0.81 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY5 (1) + Root page: 209, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 5.24, total dup: 0, max dup: 0 + Average key length: 4.05, compression ratio: 0.74 + Average prefix length: 1.29, average data length: 1.71 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + Primary pointer page: 212, Index root page: 213 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 44% + Primary pages: 1, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index NAMEX (3) + Root page: 285, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 19.52, total dup: 0, max dup: 0 + Average key length: 18.50, compression ratio: 0.96 + Average prefix length: 2.17, average data length: 15.52 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN8 (1) + Root page: 215, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 4.07, total dup: 23, max dup: 4 + Average key length: 2.98, compression ratio: 1.01 + Average prefix length: 2.19, average data length: 0.81 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN9 (2) + Root page: 216, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 10.43, total dup: 15, max dup: 4 + Average key length: 9.40, compression ratio: 1.68 + Average prefix length: 9.05, average data length: 6.79 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY7 (0) + Root page: 214, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 4.62, total dup: 0, max dup: 0 + Average key length: 3.60, compression ratio: 0.69 + Average prefix length: 1.17, average data length: 1.31 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (134) + Primary pointer page: 234, Index root page: 235 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 10% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN15 (1) + Root page: 237, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 4.29, total dup: 6, max dup: 2 + Average key length: 3.25, compression ratio: 0.74 + Average prefix length: 1.36, average data length: 1.04 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN16 (2) + Root page: 238, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 4.04, total dup: 23, max dup: 9 + Average key length: 2.89, compression ratio: 1.73 + Average prefix length: 4.14, average data length: 0.86 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY14 (0) + Root page: 236, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 13.11, total dup: 0, max dup: 0 + Average key length: 12.07, compression ratio: 0.99 + Average prefix length: 2.89, average data length: 9.11 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + Primary pointer page: 189, Index root page: 190 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 54% + Primary pages: 1, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index MAXSALX (2) + Root page: 286, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.74, total dup: 5, max dup: 1 + Average key length: 13.71, compression ratio: 1.37 + Average prefix length: 7.87, average data length: 10.90 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index MINSALX (3) + Root page: 287, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.06, total dup: 7, max dup: 2 + Average key length: 13.03, compression ratio: 1.44 + Average prefix length: 8.48, average data length: 10.29 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN3 (1) + Root page: 192, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 4.61, total dup: 24, max dup: 20 + Average key length: 3.39, compression ratio: 1.23 + Average prefix length: 2.77, average data length: 1.39 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY2 (0) + Root page: 191, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.45, total dup: 0, max dup: 0 + Average key length: 13.42, compression ratio: 1.24 + Average prefix length: 6.19, average data length: 10.45 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJECT (133) + Primary pointer page: 220, Index root page: 221 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 7% + Primary pages: 1, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index PRODTYPEX (3) + Root page: 288, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 26.50, total dup: 0, max dup: 0 + Average key length: 25.33, compression ratio: 1.05 + Average prefix length: 4.17, average data length: 22.50 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$11 (0) + Root page: 222, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 17.33, total dup: 0, max dup: 0 + Average key length: 15.50, compression ratio: 0.88 + Average prefix length: 0.33, average data length: 13.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN13 (2) + Root page: 232, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 4.67, total dup: 0, max dup: 0 + Average key length: 3.50, compression ratio: 0.57 + Average prefix length: 0.67, average data length: 1.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY12 (1) + Root page: 223, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 8.83, total dup: 0, max dup: 0 + Average key length: 7.00, compression ratio: 0.71 + Average prefix length: 0.17, average data length: 4.83 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (135) + Primary pointer page: 239, Index root page: 248 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 20% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN18 (1) + Root page: 250, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 3.92, total dup: 15, max dup: 5 + Average key length: 2.79, compression ratio: 1.07 + Average prefix length: 2.29, average data length: 0.71 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN19 (2) + Root page: 251, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 4.21, total dup: 19, max dup: 8 + Average key length: 3.04, compression ratio: 1.64 + Average prefix length: 4.00, average data length: 1.00 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY17 (0) + Root page: 249, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 10.71, total dup: 0, max dup: 0 + Average key length: 9.67, compression ratio: 1.97 + Average prefix length: 12.17, average data length: 6.83 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (136) + Primary pointer page: 253, Index root page: 254 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 30% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CHANGEX (2) + Root page: 289, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 3.37, total dup: 46, max dup: 21 + Average key length: 2.35, compression ratio: 2.98 + Average prefix length: 6.69, average data length: 0.31 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN21 (1) + Root page: 256, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 4.12, total dup: 16, max dup: 2 + Average key length: 3.10, compression ratio: 0.75 + Average prefix length: 1.43, average data length: 0.90 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY20 (0) + Root page: 255, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 22.29, total dup: 0, max dup: 0 + Average key length: 21.27, compression ratio: 1.06 + Average prefix length: 4.31, average data length: 18.29 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index UPDATERX (3) + Root page: 290, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 3.35, total dup: 46, max dup: 28 + Average key length: 2.29, compression ratio: 2.48 + Average prefix length: 5.39, average data length: 0.29 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + Primary pointer page: 267, Index root page: 268 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 35% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index NEEDX (3) + Root page: 291, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 5.97, total dup: 11, max dup: 6 + Average key length: 4.94, compression ratio: 1.10 + Average prefix length: 2.88, average data length: 2.55 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index QTYX (4) + Root page: 292, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 5.06, total dup: 11, max dup: 3 + Average key length: 4.03, compression ratio: 3.23 + Average prefix length: 11.18, average data length: 1.85 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN25 (1) + Root page: 270, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 3.55, total dup: 18, max dup: 4 + Average key length: 2.52, compression ratio: 1.19 + Average prefix length: 2.48, average data length: 0.52 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN26 (2) + Root page: 271, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 3.67, total dup: 25, max dup: 7 + Average key length: 2.64, compression ratio: 1.01 + Average prefix length: 2.21, average data length: 0.45 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY24 (0) + Root page: 269, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 8.45, total dup: 0, max dup: 0 + Average key length: 7.42, compression ratio: 1.08 + Average prefix length: 3.52, average data length: 4.48 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index SALESTATX (5) + Root page: 293, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 4.06, total dup: 27, max dup: 14 + Average key length: 3.03, compression ratio: 3.56 + Average prefix length: 9.82, average data length: 0.97 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (147) + Primary pointer page: 323, Index root page: 324 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (142) + Primary pointer page: 302, Index root page: 303 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 8% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T3 (143) + Primary pointer page: 305, Index root page: 306 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 3% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T4 (144) + Primary pointer page: 307, Index root page: 308 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 3% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T5 (145) + Primary pointer page: 315, Index root page: 316 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY28 (0) + Root page: 317, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +Gstat completion time Wed Apr 04 15:42:00 2018 + diff -Nru fdb-1.6.1+dfsg1/test/gstat30-d.out fdb-2.0.0/test/gstat30-d.out --- fdb-1.6.1+dfsg1/test/gstat30-d.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat30-d.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,243 @@ + +Database "/home/fdb/test/FBTEST30.FDB" +Gstat execution time Wed Apr 04 15:42:12 2018 + +Database header page information: + Flags 0 + Generation 2177 + System Change Number 24 + Page size 8192 + ODS version 12.0 + Oldest transaction 179 + Oldest active 2142 + Oldest snapshot 2142 + Next transaction 2142 + Sequence number 0 + Next attachment ID 1202 + Implementation HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date Nov 27, 2015 11:19:39 + Attributes force write + + Variable header data: + Database backup GUID: {F978F787-7023-4C4A-F79D-8D86645B0487} + *END* + + +Database file sequence: +File /home/fdb/test/FBTEST30.FDB is the only file + +Analyzing database pages ... +AR (140) + Primary pointer page: 297, Index root page: 299 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 86% + Primary pages: 1, secondary pages: 2, swept pages: 0 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 2 + +COUNTRY (128) + Primary pointer page: 182, Index root page: 183 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 8% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (137) + Primary pointer page: 261, Index root page: 262 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 26% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + Primary pointer page: 198, Index root page: 199 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 24% + Primary pages: 1, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + Primary pointer page: 212, Index root page: 213 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 44% + Primary pages: 1, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (134) + Primary pointer page: 234, Index root page: 235 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 10% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + Primary pointer page: 189, Index root page: 190 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 54% + Primary pages: 1, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + +PROJECT (133) + Primary pointer page: 220, Index root page: 221 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 7% + Primary pages: 1, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (135) + Primary pointer page: 239, Index root page: 248 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 20% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (136) + Primary pointer page: 253, Index root page: 254 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 30% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + Primary pointer page: 267, Index root page: 268 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 35% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (147) + Primary pointer page: 323, Index root page: 324 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (142) + Primary pointer page: 302, Index root page: 303 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 8% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T3 (143) + Primary pointer page: 305, Index root page: 306 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 3% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T4 (144) + Primary pointer page: 307, Index root page: 308 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 3% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T5 (145) + Primary pointer page: 315, Index root page: 316 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +Gstat completion time Wed Apr 04 15:42:12 2018 + diff -Nru fdb-1.6.1+dfsg1/test/gstat30-e.out fdb-2.0.0/test/gstat30-e.out --- fdb-1.6.1+dfsg1/test/gstat30-e.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat30-e.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,33 @@ + +Database "/home/fdb/test/FBTEST30.FDB" +Gstat execution time Wed Apr 04 15:45:06 2018 + +Database header page information: + Flags 0 + Generation 2181 + System Change Number 24 + Page size 8192 + ODS version 12.0 + Oldest transaction 179 + Oldest active 2146 + Oldest snapshot 2146 + Next transaction 2146 + Sequence number 0 + Next attachment ID 1214 + Implementation HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date Nov 27, 2015 11:19:39 + Attributes force write + + Variable header data: + Database backup GUID: {F978F787-7023-4C4A-F79D-8D86645B0487} + *END* + +Data pages: total 121, encrypted 0, non-crypted 121 +Index pages: total 96, encrypted 0, non-crypted 96 +Blob pages: total 11, encrypted 0, non-crypted 11 +Gstat completion time Wed Apr 04 15:45:06 2018 + diff -Nru fdb-1.6.1+dfsg1/test/gstat30-f.out fdb-2.0.0/test/gstat30-f.out --- fdb-1.6.1+dfsg1/test/gstat30-f.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat30-f.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,2214 @@ + +Database "/home/fdb/test/FBTEST30.FDB" +Gstat execution time Wed Apr 04 15:46:31 2018 + +Database header page information: + Flags 0 + Generation 2181 + System Change Number 24 + Page size 8192 + ODS version 12.0 + Oldest transaction 179 + Oldest active 2146 + Oldest snapshot 2146 + Next transaction 2146 + Sequence number 0 + Next attachment ID 1214 + Implementation HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date Nov 27, 2015 11:19:39 + Attributes force write + + Variable header data: + Database backup GUID: {F978F787-7023-4C4A-F79D-8D86645B0487} + *END* + + +Database file sequence: +File /home/fdb/test/FBTEST30.FDB is the only file + +Analyzing database pages ... +AR (140) + Primary pointer page: 297, Index root page: 299 + Total formats: 1, used formats: 1 + Average record length: 2.79, total records: 120 + Average version length: 16.61, total versions: 105, max versions: 1 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 120.00, compression ratio: 42.99 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 86% + Primary pages: 1, secondary pages: 2, swept pages: 0 + Empty pages: 0, full pages: 1 + Blobs: 125, total length: 11237, blob pages: 0 + Level 0: 125, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 2 + +COUNTRY (128) + Primary pointer page: 182, Index root page: 183 + Total formats: 1, used formats: 1 + Average record length: 25.94, total records: 16 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 34.00, compression ratio: 1.31 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 8% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY1 (0) + Root page: 186, depth: 1, leaf buckets: 1, nodes: 16 + Average node length: 10.44, total dup: 0, max dup: 0 + Average key length: 8.63, compression ratio: 0.80 + Average prefix length: 0.44, average data length: 6.44 + Clustering factor: 1, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (137) + Primary pointer page: 261, Index root page: 262 + Total formats: 1, used formats: 1 + Average record length: 125.47, total records: 15 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 241.00, compression ratio: 1.92 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 26% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTNAMEX (2) + Root page: 276, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 19.87, total dup: 0, max dup: 0 + Average key length: 18.27, compression ratio: 0.90 + Average prefix length: 0.60, average data length: 15.87 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTREGION (3) + Root page: 283, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 21.27, total dup: 0, max dup: 0 + Average key length: 20.20, compression ratio: 0.97 + Average prefix length: 2.33, average data length: 17.27 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN23 (1) + Root page: 264, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 8.60, total dup: 4, max dup: 4 + Average key length: 6.93, compression ratio: 0.83 + Average prefix length: 0.87, average data length: 4.87 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY22 (0) + Root page: 263, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 4.20, total dup: 0, max dup: 0 + Average key length: 3.13, compression ratio: 0.96 + Average prefix length: 1.87, average data length: 1.13 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + Primary pointer page: 198, Index root page: 199 + Total formats: 1, used formats: 1 + Average record length: 74.62, total records: 21 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 88.00, compression ratio: 1.18 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 24% + Primary pages: 1, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index BUDGETX (3) + Root page: 284, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 9.05, total dup: 7, max dup: 3 + Average key length: 8.00, compression ratio: 1.13 + Average prefix length: 3.62, average data length: 5.38 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$4 (0) + Root page: 208, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 17.95, total dup: 0, max dup: 0 + Average key length: 16.57, compression ratio: 1.16 + Average prefix length: 5.29, average data length: 13.95 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN10 (4) + Root page: 219, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 4.29, total dup: 3, max dup: 3 + Average key length: 3.24, compression ratio: 0.60 + Average prefix length: 0.81, average data length: 1.14 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN6 (2) + Root page: 210, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 4.10, total dup: 13, max dup: 4 + Average key length: 2.95, compression ratio: 0.97 + Average prefix length: 2.05, average data length: 0.81 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY5 (1) + Root page: 209, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 5.24, total dup: 0, max dup: 0 + Average key length: 4.05, compression ratio: 0.74 + Average prefix length: 1.29, average data length: 1.71 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + Primary pointer page: 212, Index root page: 213 + Total formats: 1, used formats: 1 + Average record length: 69.02, total records: 42 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 39.00, compression ratio: 0.57 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 44% + Primary pages: 1, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index NAMEX (3) + Root page: 285, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 19.52, total dup: 0, max dup: 0 + Average key length: 18.50, compression ratio: 0.96 + Average prefix length: 2.17, average data length: 15.52 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN8 (1) + Root page: 215, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 4.07, total dup: 23, max dup: 4 + Average key length: 2.98, compression ratio: 1.01 + Average prefix length: 2.19, average data length: 0.81 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN9 (2) + Root page: 216, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 10.43, total dup: 15, max dup: 4 + Average key length: 9.40, compression ratio: 1.68 + Average prefix length: 9.05, average data length: 6.79 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY7 (0) + Root page: 214, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 4.62, total dup: 0, max dup: 0 + Average key length: 3.60, compression ratio: 0.69 + Average prefix length: 1.17, average data length: 1.31 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (134) + Primary pointer page: 234, Index root page: 235 + Total formats: 1, used formats: 1 + Average record length: 12.00, total records: 28 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 11.00, compression ratio: 0.92 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 10% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN15 (1) + Root page: 237, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 4.29, total dup: 6, max dup: 2 + Average key length: 3.25, compression ratio: 0.74 + Average prefix length: 1.36, average data length: 1.04 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN16 (2) + Root page: 238, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 4.04, total dup: 23, max dup: 9 + Average key length: 2.89, compression ratio: 1.73 + Average prefix length: 4.14, average data length: 0.86 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY14 (0) + Root page: 236, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 13.11, total dup: 0, max dup: 0 + Average key length: 12.07, compression ratio: 0.99 + Average prefix length: 2.89, average data length: 9.11 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + Primary pointer page: 189, Index root page: 190 + Total formats: 1, used formats: 1 + Average record length: 66.13, total records: 31 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 96.00, compression ratio: 1.45 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 54% + Primary pages: 1, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 0 + Blobs: 39, total length: 4840, blob pages: 0 + Level 0: 39, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index MAXSALX (2) + Root page: 286, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.74, total dup: 5, max dup: 1 + Average key length: 13.71, compression ratio: 1.37 + Average prefix length: 7.87, average data length: 10.90 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index MINSALX (3) + Root page: 287, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.06, total dup: 7, max dup: 2 + Average key length: 13.03, compression ratio: 1.44 + Average prefix length: 8.48, average data length: 10.29 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN3 (1) + Root page: 192, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 4.61, total dup: 24, max dup: 20 + Average key length: 3.39, compression ratio: 1.23 + Average prefix length: 2.77, average data length: 1.39 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY2 (0) + Root page: 191, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.45, total dup: 0, max dup: 0 + Average key length: 13.42, compression ratio: 1.24 + Average prefix length: 6.19, average data length: 10.45 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJECT (133) + Primary pointer page: 220, Index root page: 221 + Total formats: 1, used formats: 1 + Average record length: 49.67, total records: 6 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 56.00, compression ratio: 1.13 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 7% + Primary pages: 1, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 0 + Blobs: 6, total length: 548, blob pages: 0 + Level 0: 6, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index PRODTYPEX (3) + Root page: 288, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 26.50, total dup: 0, max dup: 0 + Average key length: 25.33, compression ratio: 1.05 + Average prefix length: 4.17, average data length: 22.50 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$11 (0) + Root page: 222, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 17.33, total dup: 0, max dup: 0 + Average key length: 15.50, compression ratio: 0.88 + Average prefix length: 0.33, average data length: 13.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN13 (2) + Root page: 232, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 4.67, total dup: 0, max dup: 0 + Average key length: 3.50, compression ratio: 0.57 + Average prefix length: 0.67, average data length: 1.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY12 (1) + Root page: 223, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 8.83, total dup: 0, max dup: 0 + Average key length: 7.00, compression ratio: 0.71 + Average prefix length: 0.17, average data length: 4.83 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (135) + Primary pointer page: 239, Index root page: 248 + Total formats: 1, used formats: 1 + Average record length: 30.58, total records: 24 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 32.00, compression ratio: 1.05 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 20% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 24, total length: 1344, blob pages: 0 + Level 0: 24, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN18 (1) + Root page: 250, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 3.92, total dup: 15, max dup: 5 + Average key length: 2.79, compression ratio: 1.07 + Average prefix length: 2.29, average data length: 0.71 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN19 (2) + Root page: 251, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 4.21, total dup: 19, max dup: 8 + Average key length: 3.04, compression ratio: 1.64 + Average prefix length: 4.00, average data length: 1.00 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY17 (0) + Root page: 249, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 10.71, total dup: 0, max dup: 0 + Average key length: 9.67, compression ratio: 1.97 + Average prefix length: 12.17, average data length: 6.83 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$AUTH_MAPPING (45) + Primary pointer page: 72, Index root page: 73 + Total formats: 0, used formats: 0 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_52 (0) + Root page: 146, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$BACKUP_HISTORY (32) + Primary pointer page: 68, Index root page: 69 + Total formats: 0, used formats: 0 + Average record length: 128.63, total records: 8 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 14% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_44 (0) + Root page: 138, depth: 1, leaf buckets: 1, nodes: 8 + Average node length: 5.13, total dup: 0, max dup: 0 + Average key length: 4.00, compression ratio: 1.97 + Average prefix length: 6.00, average data length: 1.88 + Clustering factor: 1, ratio: 0.13 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$CHARACTER_SETS (28) + Primary pointer page: 60, Index root page: 61 + Total formats: 0, used formats: 0 + Average record length: 53.52, total records: 52 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 45% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_19 (0) + Root page: 113, depth: 1, leaf buckets: 1, nodes: 52 + Average node length: 6.42, total dup: 0, max dup: 0 + Average key length: 5.15, compression ratio: 1.31 + Average prefix length: 3.79, average data length: 2.98 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_25 (1) + Root page: 119, depth: 1, leaf buckets: 1, nodes: 52 + Average node length: 4.08, total dup: 0, max dup: 0 + Average key length: 3.02, compression ratio: 0.75 + Average prefix length: 1.23, average data length: 1.04 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$CHECK_CONSTRAINTS (24) + Primary pointer page: 52, Index root page: 53 + Total formats: 0, used formats: 0 + Average record length: 26.22, total records: 68 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 36% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_14 (0) + Root page: 106, depth: 1, leaf buckets: 1, nodes: 68 + Average node length: 3.93, total dup: 14, max dup: 1 + Average key length: 2.91, compression ratio: 2.71 + Average prefix length: 6.99, average data length: 0.90 + Clustering factor: 1, ratio: 0.01 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_40 (1) + Root page: 134, depth: 1, leaf buckets: 1, nodes: 68 + Average node length: 7.37, total dup: 10, max dup: 2 + Average key length: 6.16, compression ratio: 1.35 + Average prefix length: 4.43, average data length: 3.91 + Clustering factor: 1, ratio: 0.01 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$COLLATIONS (29) + Primary pointer page: 62, Index root page: 63 + Total formats: 0, used formats: 0 + Average record length: 45.36, total records: 150 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 41% + Primary pages: 2, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 1 + Blobs: 12, total length: 307, blob pages: 0 + Level 0: 12, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + + Index RDB$INDEX_20 (0) + Root page: 114, depth: 1, leaf buckets: 1, nodes: 150 + Average node length: 7.58, total dup: 0, max dup: 0 + Average key length: 6.47, compression ratio: 1.20 + Average prefix length: 3.95, average data length: 3.79 + Clustering factor: 24, ratio: 0.16 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_26 (1) + Root page: 120, depth: 1, leaf buckets: 1, nodes: 150 + Average node length: 5.08, total dup: 0, max dup: 0 + Average key length: 4.07, compression ratio: 2.03 + Average prefix length: 6.46, average data length: 1.82 + Clustering factor: 32, ratio: 0.21 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$DATABASE (1) + Primary pointer page: 6, Index root page: 7 + Total formats: 0, used formats: 0 + Average record length: 25.00, total records: 1 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 1% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$DB_CREATORS (47) + Primary pointer page: 74, Index root page: 75 + Total formats: 0, used formats: 0 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$DEPENDENCIES (13) + Primary pointer page: 30, Index root page: 31 + Total formats: 0, used formats: 0 + Average record length: 42.32, total records: 168 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 43% + Primary pages: 3, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 2 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_27 (0) + Root page: 121, depth: 1, leaf buckets: 1, nodes: 168 + Average node length: 4.30, total dup: 120, max dup: 13 + Average key length: 3.23, compression ratio: 2.98 + Average prefix length: 8.46, average data length: 1.18 + Clustering factor: 15, ratio: 0.09 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_28 (1) + Root page: 122, depth: 1, leaf buckets: 1, nodes: 168 + Average node length: 4.15, total dup: 147, max dup: 36 + Average key length: 3.10, compression ratio: 2.55 + Average prefix length: 6.85, average data length: 1.04 + Clustering factor: 22, ratio: 0.13 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$EXCEPTIONS (30) + Primary pointer page: 64, Index root page: 65 + Total formats: 0, used formats: 0 + Average record length: 101.40, total records: 5 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 7% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_23 (0) + Root page: 117, depth: 1, leaf buckets: 1, nodes: 5 + Average node length: 18.00, total dup: 0, max dup: 0 + Average key length: 16.20, compression ratio: 0.98 + Average prefix length: 1.80, average data length: 14.00 + Clustering factor: 1, ratio: 0.20 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_24 (1) + Root page: 118, depth: 1, leaf buckets: 1, nodes: 5 + Average node length: 4.40, total dup: 0, max dup: 0 + Average key length: 3.00, compression ratio: 0.60 + Average prefix length: 0.60, average data length: 1.20 + Clustering factor: 1, ratio: 0.20 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FIELDS (2) + Primary pointer page: 8, Index root page: 9 + Total formats: 0, used formats: 0 + Average record length: 55.02, total records: 290 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 5 + Data pages: 5, average fill: 57% + Primary pages: 4, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 2 + Blobs: 33, total length: 902, blob pages: 0 + Level 0: 33, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 2 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 1 + + Index RDB$INDEX_2 (0) + Root page: 94, depth: 1, leaf buckets: 1, nodes: 290 + Average node length: 8.20, total dup: 0, max dup: 0 + Average key length: 7.16, compression ratio: 1.54 + Average prefix length: 6.37, average data length: 4.63 + Clustering factor: 71, ratio: 0.24 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FIELD_DIMENSIONS (21) + Primary pointer page: 46, Index root page: 47 + Total formats: 0, used formats: 0 + Average record length: 23.00, total records: 19 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 9% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_36 (0) + Root page: 130, depth: 1, leaf buckets: 1, nodes: 19 + Average node length: 4.47, total dup: 3, max dup: 2 + Average key length: 3.42, compression ratio: 1.74 + Average prefix length: 4.68, average data length: 1.26 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FILES (10) + Primary pointer page: 24, Index root page: 25 + Total formats: 0, used formats: 0 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FILTERS (16) + Primary pointer page: 36, Index root page: 37 + Total formats: 0, used formats: 0 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_17 (0) + Root page: 111, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_45 (1) + Root page: 139, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FORMATS (8) + Primary pointer page: 20, Index root page: 21 + Total formats: 0, used formats: 0 + Average record length: 15.76, total records: 17 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 18% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 17, total length: 1818, blob pages: 0 + Level 0: 17, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_16 (0) + Root page: 110, depth: 1, leaf buckets: 1, nodes: 17 + Average node length: 9.29, total dup: 0, max dup: 0 + Average key length: 8.24, compression ratio: 0.97 + Average prefix length: 2.71, average data length: 5.29 + Clustering factor: 1, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FUNCTIONS (14) + Primary pointer page: 32, Index root page: 33 + Total formats: 0, used formats: 0 + Average record length: 68.33, total records: 6 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 12% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 15, total length: 933, blob pages: 0 + Level 0: 15, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_53 (1) + Root page: 147, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 4.67, total dup: 0, max dup: 0 + Average key length: 3.33, compression ratio: 0.60 + Average prefix length: 0.67, average data length: 1.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_9 (0) + Root page: 101, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 7.00, total dup: 0, max dup: 0 + Average key length: 5.67, compression ratio: 1.09 + Average prefix length: 2.67, average data length: 3.50 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FUNCTION_ARGUMENTS (15) + Primary pointer page: 34, Index root page: 35 + Total formats: 0, used formats: 0 + Average record length: 38.58, total records: 12 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 10% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_10 (0) + Root page: 102, depth: 1, leaf buckets: 1, nodes: 12 + Average node length: 5.00, total dup: 6, max dup: 2 + Average key length: 3.83, compression ratio: 1.61 + Average prefix length: 4.42, average data length: 1.75 + Clustering factor: 1, ratio: 0.08 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_49 (1) + Root page: 143, depth: 1, leaf buckets: 1, nodes: 12 + Average node length: 6.33, total dup: 0, max dup: 0 + Average key length: 5.08, compression ratio: 1.43 + Average prefix length: 4.33, average data length: 2.92 + Clustering factor: 1, ratio: 0.08 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_51 (2) + Root page: 145, depth: 1, leaf buckets: 1, nodes: 12 + Average node length: 4.25, total dup: 10, max dup: 10 + Average key length: 3.17, compression ratio: 0.66 + Average prefix length: 0.00, average data length: 2.08 + Clustering factor: 1, ratio: 0.08 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$GENERATORS (20) + Primary pointer page: 44, Index root page: 45 + Total formats: 0, used formats: 0 + Average record length: 51.80, total records: 15 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 9% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 9, total length: 160, blob pages: 0 + Level 0: 9, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_11 (0) + Root page: 103, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 14.27, total dup: 0, max dup: 0 + Average key length: 13.00, compression ratio: 1.03 + Average prefix length: 3.00, average data length: 10.33 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_46 (1) + Root page: 140, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 4.13, total dup: 0, max dup: 0 + Average key length: 3.00, compression ratio: 0.64 + Average prefix length: 0.87, average data length: 1.07 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$INDEX_SEGMENTS (3) + Primary pointer page: 10, Index root page: 11 + Total formats: 0, used formats: 0 + Average record length: 37.32, total records: 122 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 41% + Primary pages: 2, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_6 (0) + Root page: 98, depth: 1, leaf buckets: 1, nodes: 122 + Average node length: 4.68, total dup: 29, max dup: 2 + Average key length: 3.61, compression ratio: 3.12 + Average prefix length: 9.70, average data length: 1.54 + Clustering factor: 12, ratio: 0.10 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$INDICES (4) + Primary pointer page: 12, Index root page: 13 + Total formats: 0, used formats: 0 + Average record length: 49.41, total records: 93 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 38% + Primary pages: 2, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_31 (1) + Root page: 125, depth: 1, leaf buckets: 1, nodes: 93 + Average node length: 7.67, total dup: 51, max dup: 5 + Average key length: 6.58, compression ratio: 2.02 + Average prefix length: 9.09, average data length: 4.22 + Clustering factor: 2, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_41 (2) + Root page: 135, depth: 1, leaf buckets: 1, nodes: 93 + Average node length: 2.34, total dup: 86, max dup: 78 + Average key length: 1.33, compression ratio: 1.38 + Average prefix length: 1.66, average data length: 0.18 + Clustering factor: 3, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_5 (0) + Root page: 97, depth: 1, leaf buckets: 1, nodes: 93 + Average node length: 5.20, total dup: 0, max dup: 0 + Average key length: 4.11, compression ratio: 2.76 + Average prefix length: 9.30, average data length: 2.02 + Clustering factor: 5, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$LOG_FILES (25) + Primary pointer page: 54, Index root page: 55 + Total formats: 0, used formats: 0 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PACKAGES (42) + Primary pointer page: 70, Index root page: 71 + Total formats: 0, used formats: 0 + Average record length: 50.50, total records: 2 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 6% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 6, total length: 584, blob pages: 0 + Level 0: 6, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_47 (0) + Root page: 141, depth: 1, leaf buckets: 1, nodes: 2 + Average node length: 6.00, total dup: 0, max dup: 0 + Average key length: 4.50, compression ratio: 1.00 + Average prefix length: 2.00, average data length: 2.50 + Clustering factor: 1, ratio: 0.50 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PAGES (0) + Primary pointer page: 3, Index root page: 4 + Total formats: 0, used formats: 0 + Average record length: 14.85, total records: 106 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 41% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PROCEDURES (26) + Primary pointer page: 56, Index root page: 57 + Total formats: 0, used formats: 0 + Average record length: 81.09, total records: 11 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 61% + Primary pages: 1, secondary pages: 2, swept pages: 0 + Empty pages: 0, full pages: 1 + Blobs: 32, total length: 12681, blob pages: 0 + Level 0: 32, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 1 + + Index RDB$INDEX_21 (0) + Root page: 115, depth: 1, leaf buckets: 1, nodes: 11 + Average node length: 16.45, total dup: 0, max dup: 0 + Average key length: 15.27, compression ratio: 0.90 + Average prefix length: 1.36, average data length: 12.45 + Clustering factor: 1, ratio: 0.09 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_22 (1) + Root page: 116, depth: 1, leaf buckets: 1, nodes: 11 + Average node length: 4.18, total dup: 0, max dup: 0 + Average key length: 3.00, compression ratio: 0.64 + Average prefix length: 0.82, average data length: 1.09 + Clustering factor: 1, ratio: 0.09 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PROCEDURE_PARAMETERS (27) + Primary pointer page: 58, Index root page: 59 + Total formats: 0, used formats: 0 + Average record length: 43.23, total records: 35 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 26% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_18 (0) + Root page: 112, depth: 1, leaf buckets: 1, nodes: 35 + Average node length: 14.89, total dup: 0, max dup: 0 + Average key length: 13.83, compression ratio: 1.71 + Average prefix length: 12.66, average data length: 11.06 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_48 (1) + Root page: 142, depth: 1, leaf buckets: 1, nodes: 35 + Average node length: 4.49, total dup: 0, max dup: 0 + Average key length: 3.46, compression ratio: 1.75 + Average prefix length: 4.74, average data length: 1.31 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_50 (2) + Root page: 144, depth: 1, leaf buckets: 1, nodes: 35 + Average node length: 2.00, total dup: 34, max dup: 34 + Average key length: 1.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$REF_CONSTRAINTS (23) + Primary pointer page: 50, Index root page: 51 + Total formats: 0, used formats: 0 + Average record length: 54.86, total records: 14 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 12% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_13 (0) + Root page: 105, depth: 1, leaf buckets: 1, nodes: 14 + Average node length: 5.43, total dup: 0, max dup: 0 + Average key length: 4.36, compression ratio: 1.84 + Average prefix length: 6.07, average data length: 1.93 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATIONS (6) + Primary pointer page: 16, Index root page: 17 + Total formats: 0, used formats: 0 + Average record length: 79.91, total records: 67 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 63% + Primary pages: 1, secondary pages: 2, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 28, total length: 6674, blob pages: 0 + Level 0: 28, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 1 + + Index RDB$INDEX_0 (0) + Root page: 90, depth: 1, leaf buckets: 1, nodes: 67 + Average node length: 12.87, total dup: 0, max dup: 0 + Average key length: 11.72, compression ratio: 1.13 + Average prefix length: 4.27, average data length: 8.94 + Clustering factor: 1, ratio: 0.01 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_1 (1) + Root page: 93, depth: 1, leaf buckets: 1, nodes: 67 + Average node length: 4.03, total dup: 0, max dup: 0 + Average key length: 2.99, compression ratio: 0.78 + Average prefix length: 1.30, average data length: 1.01 + Clustering factor: 1, ratio: 0.01 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATION_CONSTRAINTS (22) + Primary pointer page: 48, Index root page: 49 + Total formats: 0, used formats: 0 + Average record length: 54.02, total records: 108 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 47% + Primary pages: 2, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_12 (0) + Root page: 104, depth: 1, leaf buckets: 1, nodes: 108 + Average node length: 4.22, total dup: 0, max dup: 0 + Average key length: 3.20, compression ratio: 2.77 + Average prefix length: 7.69, average data length: 1.18 + Clustering factor: 9, ratio: 0.08 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_42 (1) + Root page: 136, depth: 1, leaf buckets: 1, nodes: 108 + Average node length: 12.20, total dup: 50, max dup: 8 + Average key length: 11.19, compression ratio: 2.17 + Average prefix length: 15.64, average data length: 8.67 + Clustering factor: 6, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_43 (2) + Root page: 137, depth: 1, leaf buckets: 1, nodes: 108 + Average node length: 3.35, total dup: 53, max dup: 53 + Average key length: 2.34, compression ratio: 2.55 + Average prefix length: 5.20, average data length: 0.78 + Clustering factor: 7, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATION_FIELDS (5) + Primary pointer page: 14, Index root page: 15 + Total formats: 0, used formats: 0 + Average record length: 69.66, total records: 583 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 16 + Data pages: 16, average fill: 39% + Primary pages: 15, secondary pages: 1, swept pages: 1 + Empty pages: 7, full pages: 7 + Blobs: 20, total length: 219, blob pages: 0 + Level 0: 20, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 8 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 7 + 80 - 99% = 1 + + Index RDB$INDEX_15 (2) + Root page: 109, depth: 2, leaf buckets: 3, nodes: 583 + Average node length: 25.07, total dup: 0, max dup: 0 + Average key length: 24.06, compression ratio: 1.49 + Average prefix length: 14.84, average data length: 21.08 + Clustering factor: 301, ratio: 0.52 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 2 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_3 (0) + Root page: 95, depth: 1, leaf buckets: 1, nodes: 583 + Average node length: 5.47, total dup: 341, max dup: 23 + Average key length: 4.45, compression ratio: 2.90 + Average prefix length: 10.71, average data length: 2.19 + Clustering factor: 239, ratio: 0.41 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_4 (1) + Root page: 96, depth: 1, leaf buckets: 1, nodes: 583 + Average node length: 4.13, total dup: 516, max dup: 29 + Average key length: 3.12, compression ratio: 4.21 + Average prefix length: 12.09, average data length: 1.03 + Clustering factor: 51, ratio: 0.09 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$ROLES (31) + Primary pointer page: 66, Index root page: 67 + Total formats: 0, used formats: 0 + Average record length: 29.00, total records: 2 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 1% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_39 (0) + Root page: 133, depth: 1, leaf buckets: 1, nodes: 2 + Average node length: 13.00, total dup: 0, max dup: 0 + Average key length: 11.00, compression ratio: 0.82 + Average prefix length: 0.00, average data length: 9.00 + Clustering factor: 1, ratio: 0.50 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$SECURITY_CLASSES (9) + Primary pointer page: 22, Index root page: 23 + Total formats: 0, used formats: 0 + Average record length: 25.20, total records: 710 + Average version length: 17.00, total versions: 6, max versions: 3 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 16 + Data pages: 16, average fill: 54% + Primary pages: 10, secondary pages: 6, swept pages: 1 + Empty pages: 4, full pages: 10 + Blobs: 713, total length: 15892, blob pages: 0 + Level 0: 713, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 4 + 20 - 39% = 1 + 40 - 59% = 1 + 60 - 79% = 5 + 80 - 99% = 5 + + Index RDB$INDEX_7 (0) + Root page: 99, depth: 1, leaf buckets: 1, nodes: 710 + Average node length: 4.27, total dup: 0, max dup: 0 + Average key length: 3.15, compression ratio: 2.39 + Average prefix length: 6.38, average data length: 1.13 + Clustering factor: 123, ratio: 0.17 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRANSACTIONS (19) + Primary pointer page: 42, Index root page: 43 + Total formats: 0, used formats: 0 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_32 (0) + Root page: 126, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRIGGERS (12) + Primary pointer page: 28, Index root page: 29 + Total formats: 0, used formats: 0 + Average record length: 62.52, total records: 63 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 4 + Data pages: 4, average fill: 81% + Primary pages: 1, secondary pages: 3, swept pages: 0 + Empty pages: 0, full pages: 2 + Blobs: 103, total length: 18114, blob pages: 0 + Level 0: 103, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 2 + + Index RDB$INDEX_38 (1) + Root page: 132, depth: 1, leaf buckets: 1, nodes: 63 + Average node length: 5.84, total dup: 46, max dup: 18 + Average key length: 4.75, compression ratio: 2.52 + Average prefix length: 9.35, average data length: 2.60 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_8 (0) + Root page: 100, depth: 1, leaf buckets: 1, nodes: 63 + Average node length: 5.32, total dup: 0, max dup: 0 + Average key length: 4.24, compression ratio: 2.57 + Average prefix length: 8.71, average data length: 2.19 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRIGGER_MESSAGES (17) + Primary pointer page: 38, Index root page: 39 + Total formats: 0, used formats: 0 + Average record length: 60.29, total records: 35 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 33% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_35 (0) + Root page: 129, depth: 1, leaf buckets: 1, nodes: 35 + Average node length: 4.03, total dup: 12, max dup: 5 + Average key length: 3.00, compression ratio: 4.58 + Average prefix length: 12.74, average data length: 1.00 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TYPES (11) + Primary pointer page: 26, Index root page: 27 + Total formats: 0, used formats: 0 + Average record length: 44.99, total records: 254 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 64% + Primary pages: 3, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 2 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 2 + 80 - 99% = 0 + + Index RDB$INDEX_37 (0) + Root page: 131, depth: 1, leaf buckets: 1, nodes: 254 + Average node length: 8.22, total dup: 15, max dup: 2 + Average key length: 7.14, compression ratio: 1.18 + Average prefix length: 3.85, average data length: 4.57 + Clustering factor: 84, ratio: 0.33 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$USER_PRIVILEGES (18) + Primary pointer page: 40, Index root page: 41 + Total formats: 0, used formats: 0 + Average record length: 48.55, total records: 1026 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 16 + Data pages: 16, average fill: 51% + Primary pages: 16, secondary pages: 0, swept pages: 0 + Empty pages: 4, full pages: 11 + Fill distribution: + 0 - 19% = 5 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 11 + 80 - 99% = 0 + + Index RDB$INDEX_29 (0) + Root page: 123, depth: 1, leaf buckets: 1, nodes: 1026 + Average node length: 6.01, total dup: 448, max dup: 9 + Average key length: 4.78, compression ratio: 2.30 + Average prefix length: 8.54, average data length: 2.45 + Clustering factor: 359, ratio: 0.35 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_30 (1) + Root page: 124, depth: 1, leaf buckets: 1, nodes: 1026 + Average node length: 3.22, total dup: 1024, max dup: 910 + Average key length: 2.01, compression ratio: 2.98 + Average prefix length: 5.99, average data length: 0.01 + Clustering factor: 18, ratio: 0.02 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$VIEW_RELATIONS (7) + Primary pointer page: 18, Index root page: 19 + Total formats: 0, used formats: 0 + Average record length: 47.00, total records: 2 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 2% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_33 (0) + Root page: 127, depth: 1, leaf buckets: 1, nodes: 2 + Average node length: 8.50, total dup: 1, max dup: 1 + Average key length: 7.00, compression ratio: 1.43 + Average prefix length: 5.00, average data length: 5.00 + Clustering factor: 1, ratio: 0.50 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_34 (1) + Root page: 128, depth: 1, leaf buckets: 1, nodes: 2 + Average node length: 13.00, total dup: 0, max dup: 0 + Average key length: 11.00, compression ratio: 0.82 + Average prefix length: 0.00, average data length: 9.00 + Clustering factor: 1, ratio: 0.50 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (136) + Primary pointer page: 253, Index root page: 254 + Total formats: 1, used formats: 1 + Average record length: 33.29, total records: 49 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 8.00, compression ratio: 0.24 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 30% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CHANGEX (2) + Root page: 289, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 3.37, total dup: 46, max dup: 21 + Average key length: 2.35, compression ratio: 2.98 + Average prefix length: 6.69, average data length: 0.31 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN21 (1) + Root page: 256, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 4.12, total dup: 16, max dup: 2 + Average key length: 3.10, compression ratio: 0.75 + Average prefix length: 1.43, average data length: 0.90 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY20 (0) + Root page: 255, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 22.29, total dup: 0, max dup: 0 + Average key length: 21.27, compression ratio: 1.06 + Average prefix length: 4.31, average data length: 18.29 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index UPDATERX (3) + Root page: 290, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 3.35, total dup: 46, max dup: 28 + Average key length: 2.29, compression ratio: 2.48 + Average prefix length: 5.39, average data length: 0.29 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + Primary pointer page: 267, Index root page: 268 + Total formats: 1, used formats: 1 + Average record length: 68.82, total records: 33 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 8.00, compression ratio: 0.12 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 35% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index NEEDX (3) + Root page: 291, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 5.97, total dup: 11, max dup: 6 + Average key length: 4.94, compression ratio: 1.10 + Average prefix length: 2.88, average data length: 2.55 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index QTYX (4) + Root page: 292, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 5.06, total dup: 11, max dup: 3 + Average key length: 4.03, compression ratio: 3.23 + Average prefix length: 11.18, average data length: 1.85 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN25 (1) + Root page: 270, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 3.55, total dup: 18, max dup: 4 + Average key length: 2.52, compression ratio: 1.19 + Average prefix length: 2.48, average data length: 0.52 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN26 (2) + Root page: 271, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 3.67, total dup: 25, max dup: 7 + Average key length: 2.64, compression ratio: 1.01 + Average prefix length: 2.21, average data length: 0.45 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY24 (0) + Root page: 269, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 8.45, total dup: 0, max dup: 0 + Average key length: 7.42, compression ratio: 1.08 + Average prefix length: 3.52, average data length: 4.48 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index SALESTATX (5) + Root page: 293, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 4.06, total dup: 27, max dup: 14 + Average key length: 3.03, compression ratio: 3.56 + Average prefix length: 9.82, average data length: 0.97 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (147) + Primary pointer page: 323, Index root page: 324 + Total formats: 1, used formats: 0 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (142) + Primary pointer page: 302, Index root page: 303 + Total formats: 1, used formats: 1 + Average record length: 0.00, total records: 4 + Average version length: 14.25, total versions: 4, max versions: 1 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 120.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 8% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 3, total length: 954, blob pages: 0 + Level 0: 3, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T3 (143) + Primary pointer page: 305, Index root page: 306 + Total formats: 1, used formats: 1 + Average record length: 0.00, total records: 3 + Average version length: 22.67, total versions: 3, max versions: 1 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 112.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 3% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 2, total length: 313, blob pages: 0 + Level 0: 2, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T4 (144) + Primary pointer page: 307, Index root page: 308 + Total formats: 1, used formats: 1 + Average record length: 0.00, total records: 2 + Average version length: 75.00, total versions: 2, max versions: 1 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 264.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 3% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T5 (145) + Primary pointer page: 315, Index root page: 316 + Total formats: 1, used formats: 0 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY28 (0) + Root page: 317, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +Gstat completion time Wed Apr 04 15:46:31 2018 + diff -Nru fdb-1.6.1+dfsg1/test/gstat30-h.out fdb-2.0.0/test/gstat30-h.out --- fdb-1.6.1+dfsg1/test/gstat30-h.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat30-h.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,29 @@ + +Database "/home/fdb/test/FBTEST30.FDB" +Gstat execution time Wed Apr 04 15:41:34 2018 + +Database header page information: + Flags 0 + Generation 2176 + System Change Number 24 + Page size 8192 + ODS version 12.0 + Oldest transaction 179 + Oldest active 2140 + Oldest snapshot 2140 + Next transaction 2141 + Sequence number 0 + Next attachment ID 1199 + Implementation HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date Nov 27, 2015 11:19:39 + Attributes force write + + Variable header data: + Database backup GUID: {F978F787-7023-4C4A-F79D-8D86645B0487} + *END* +Gstat completion time Wed Apr 04 15:41:34 2018 + diff -Nru fdb-1.6.1+dfsg1/test/gstat30-i.out fdb-2.0.0/test/gstat30-i.out --- fdb-1.6.1+dfsg1/test/gstat30-i.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat30-i.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,574 @@ + +Database "/home/fdb/test/FBTEST30.FDB" +Gstat execution time Wed Apr 04 15:42:25 2018 + +Database header page information: + Flags 0 + Generation 2178 + System Change Number 24 + Page size 8192 + ODS version 12.0 + Oldest transaction 179 + Oldest active 2143 + Oldest snapshot 2143 + Next transaction 2143 + Sequence number 0 + Next attachment ID 1205 + Implementation HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date Nov 27, 2015 11:19:39 + Attributes force write + + Variable header data: + Database backup GUID: {F978F787-7023-4C4A-F79D-8D86645B0487} + *END* + + +Database file sequence: +File /home/fdb/test/FBTEST30.FDB is the only file + +Analyzing database pages ... +AR (140) + +COUNTRY (128) + + Index RDB$PRIMARY1 (0) + Root page: 186, depth: 1, leaf buckets: 1, nodes: 16 + Average node length: 10.44, total dup: 0, max dup: 0 + Average key length: 8.63, compression ratio: 0.80 + Average prefix length: 0.44, average data length: 6.44 + Clustering factor: 1, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (137) + + Index CUSTNAMEX (2) + Root page: 276, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 19.87, total dup: 0, max dup: 0 + Average key length: 18.27, compression ratio: 0.90 + Average prefix length: 0.60, average data length: 15.87 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTREGION (3) + Root page: 283, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 21.27, total dup: 0, max dup: 0 + Average key length: 20.20, compression ratio: 0.97 + Average prefix length: 2.33, average data length: 17.27 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN23 (1) + Root page: 264, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 8.60, total dup: 4, max dup: 4 + Average key length: 6.93, compression ratio: 0.83 + Average prefix length: 0.87, average data length: 4.87 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY22 (0) + Root page: 263, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 4.20, total dup: 0, max dup: 0 + Average key length: 3.13, compression ratio: 0.96 + Average prefix length: 1.87, average data length: 1.13 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + + Index BUDGETX (3) + Root page: 284, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 9.05, total dup: 7, max dup: 3 + Average key length: 8.00, compression ratio: 1.13 + Average prefix length: 3.62, average data length: 5.38 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$4 (0) + Root page: 208, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 17.95, total dup: 0, max dup: 0 + Average key length: 16.57, compression ratio: 1.16 + Average prefix length: 5.29, average data length: 13.95 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN10 (4) + Root page: 219, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 4.29, total dup: 3, max dup: 3 + Average key length: 3.24, compression ratio: 0.60 + Average prefix length: 0.81, average data length: 1.14 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN6 (2) + Root page: 210, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 4.10, total dup: 13, max dup: 4 + Average key length: 2.95, compression ratio: 0.97 + Average prefix length: 2.05, average data length: 0.81 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY5 (1) + Root page: 209, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 5.24, total dup: 0, max dup: 0 + Average key length: 4.05, compression ratio: 0.74 + Average prefix length: 1.29, average data length: 1.71 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + + Index NAMEX (3) + Root page: 285, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 19.52, total dup: 0, max dup: 0 + Average key length: 18.50, compression ratio: 0.96 + Average prefix length: 2.17, average data length: 15.52 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN8 (1) + Root page: 215, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 4.07, total dup: 23, max dup: 4 + Average key length: 2.98, compression ratio: 1.01 + Average prefix length: 2.19, average data length: 0.81 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN9 (2) + Root page: 216, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 10.43, total dup: 15, max dup: 4 + Average key length: 9.40, compression ratio: 1.68 + Average prefix length: 9.05, average data length: 6.79 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY7 (0) + Root page: 214, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 4.62, total dup: 0, max dup: 0 + Average key length: 3.60, compression ratio: 0.69 + Average prefix length: 1.17, average data length: 1.31 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (134) + + Index RDB$FOREIGN15 (1) + Root page: 237, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 4.29, total dup: 6, max dup: 2 + Average key length: 3.25, compression ratio: 0.74 + Average prefix length: 1.36, average data length: 1.04 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN16 (2) + Root page: 238, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 4.04, total dup: 23, max dup: 9 + Average key length: 2.89, compression ratio: 1.73 + Average prefix length: 4.14, average data length: 0.86 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY14 (0) + Root page: 236, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 13.11, total dup: 0, max dup: 0 + Average key length: 12.07, compression ratio: 0.99 + Average prefix length: 2.89, average data length: 9.11 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + + Index MAXSALX (2) + Root page: 286, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.74, total dup: 5, max dup: 1 + Average key length: 13.71, compression ratio: 1.37 + Average prefix length: 7.87, average data length: 10.90 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index MINSALX (3) + Root page: 287, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.06, total dup: 7, max dup: 2 + Average key length: 13.03, compression ratio: 1.44 + Average prefix length: 8.48, average data length: 10.29 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN3 (1) + Root page: 192, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 4.61, total dup: 24, max dup: 20 + Average key length: 3.39, compression ratio: 1.23 + Average prefix length: 2.77, average data length: 1.39 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY2 (0) + Root page: 191, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.45, total dup: 0, max dup: 0 + Average key length: 13.42, compression ratio: 1.24 + Average prefix length: 6.19, average data length: 10.45 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJECT (133) + + Index PRODTYPEX (3) + Root page: 288, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 26.50, total dup: 0, max dup: 0 + Average key length: 25.33, compression ratio: 1.05 + Average prefix length: 4.17, average data length: 22.50 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$11 (0) + Root page: 222, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 17.33, total dup: 0, max dup: 0 + Average key length: 15.50, compression ratio: 0.88 + Average prefix length: 0.33, average data length: 13.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN13 (2) + Root page: 232, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 4.67, total dup: 0, max dup: 0 + Average key length: 3.50, compression ratio: 0.57 + Average prefix length: 0.67, average data length: 1.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY12 (1) + Root page: 223, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 8.83, total dup: 0, max dup: 0 + Average key length: 7.00, compression ratio: 0.71 + Average prefix length: 0.17, average data length: 4.83 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (135) + + Index RDB$FOREIGN18 (1) + Root page: 250, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 3.92, total dup: 15, max dup: 5 + Average key length: 2.79, compression ratio: 1.07 + Average prefix length: 2.29, average data length: 0.71 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN19 (2) + Root page: 251, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 4.21, total dup: 19, max dup: 8 + Average key length: 3.04, compression ratio: 1.64 + Average prefix length: 4.00, average data length: 1.00 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY17 (0) + Root page: 249, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 10.71, total dup: 0, max dup: 0 + Average key length: 9.67, compression ratio: 1.97 + Average prefix length: 12.17, average data length: 6.83 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (136) + + Index CHANGEX (2) + Root page: 289, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 3.37, total dup: 46, max dup: 21 + Average key length: 2.35, compression ratio: 2.98 + Average prefix length: 6.69, average data length: 0.31 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN21 (1) + Root page: 256, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 4.12, total dup: 16, max dup: 2 + Average key length: 3.10, compression ratio: 0.75 + Average prefix length: 1.43, average data length: 0.90 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY20 (0) + Root page: 255, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 22.29, total dup: 0, max dup: 0 + Average key length: 21.27, compression ratio: 1.06 + Average prefix length: 4.31, average data length: 18.29 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index UPDATERX (3) + Root page: 290, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 3.35, total dup: 46, max dup: 28 + Average key length: 2.29, compression ratio: 2.48 + Average prefix length: 5.39, average data length: 0.29 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + + Index NEEDX (3) + Root page: 291, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 5.97, total dup: 11, max dup: 6 + Average key length: 4.94, compression ratio: 1.10 + Average prefix length: 2.88, average data length: 2.55 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index QTYX (4) + Root page: 292, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 5.06, total dup: 11, max dup: 3 + Average key length: 4.03, compression ratio: 3.23 + Average prefix length: 11.18, average data length: 1.85 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN25 (1) + Root page: 270, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 3.55, total dup: 18, max dup: 4 + Average key length: 2.52, compression ratio: 1.19 + Average prefix length: 2.48, average data length: 0.52 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN26 (2) + Root page: 271, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 3.67, total dup: 25, max dup: 7 + Average key length: 2.64, compression ratio: 1.01 + Average prefix length: 2.21, average data length: 0.45 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY24 (0) + Root page: 269, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 8.45, total dup: 0, max dup: 0 + Average key length: 7.42, compression ratio: 1.08 + Average prefix length: 3.52, average data length: 4.48 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index SALESTATX (5) + Root page: 293, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 4.06, total dup: 27, max dup: 14 + Average key length: 3.03, compression ratio: 3.56 + Average prefix length: 9.82, average data length: 0.97 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (147) + +T2 (142) + +T3 (143) + +T4 (144) + +T5 (145) + + Index RDB$PRIMARY28 (0) + Root page: 317, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +Gstat completion time Wed Apr 04 15:42:25 2018 + diff -Nru fdb-1.6.1+dfsg1/test/gstat30-r.out fdb-2.0.0/test/gstat30-r.out --- fdb-1.6.1+dfsg1/test/gstat30-r.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat30-r.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,842 @@ + +Database "/home/fdb/test/FBTEST30.FDB" +Gstat execution time Wed Apr 04 15:42:35 2018 + +Database header page information: + Flags 0 + Generation 2179 + System Change Number 24 + Page size 8192 + ODS version 12.0 + Oldest transaction 179 + Oldest active 2144 + Oldest snapshot 2144 + Next transaction 2144 + Sequence number 0 + Next attachment ID 1208 + Implementation HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date Nov 27, 2015 11:19:39 + Attributes force write + + Variable header data: + Database backup GUID: {F978F787-7023-4C4A-F79D-8D86645B0487} + *END* + + +Database file sequence: +File /home/fdb/test/FBTEST30.FDB is the only file + +Analyzing database pages ... +AR (140) + Primary pointer page: 297, Index root page: 299 + Total formats: 1, used formats: 1 + Average record length: 2.79, total records: 120 + Average version length: 16.61, total versions: 105, max versions: 1 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 120.00, compression ratio: 42.99 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 86% + Primary pages: 1, secondary pages: 2, swept pages: 0 + Empty pages: 0, full pages: 1 + Blobs: 125, total length: 11237, blob pages: 0 + Level 0: 125, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 2 + +COUNTRY (128) + Primary pointer page: 182, Index root page: 183 + Total formats: 1, used formats: 1 + Average record length: 25.94, total records: 16 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 34.00, compression ratio: 1.31 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 8% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY1 (0) + Root page: 186, depth: 1, leaf buckets: 1, nodes: 16 + Average node length: 10.44, total dup: 0, max dup: 0 + Average key length: 8.63, compression ratio: 0.80 + Average prefix length: 0.44, average data length: 6.44 + Clustering factor: 1, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (137) + Primary pointer page: 261, Index root page: 262 + Total formats: 1, used formats: 1 + Average record length: 125.47, total records: 15 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 241.00, compression ratio: 1.92 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 26% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTNAMEX (2) + Root page: 276, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 19.87, total dup: 0, max dup: 0 + Average key length: 18.27, compression ratio: 0.90 + Average prefix length: 0.60, average data length: 15.87 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTREGION (3) + Root page: 283, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 21.27, total dup: 0, max dup: 0 + Average key length: 20.20, compression ratio: 0.97 + Average prefix length: 2.33, average data length: 17.27 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN23 (1) + Root page: 264, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 8.60, total dup: 4, max dup: 4 + Average key length: 6.93, compression ratio: 0.83 + Average prefix length: 0.87, average data length: 4.87 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY22 (0) + Root page: 263, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 4.20, total dup: 0, max dup: 0 + Average key length: 3.13, compression ratio: 0.96 + Average prefix length: 1.87, average data length: 1.13 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + Primary pointer page: 198, Index root page: 199 + Total formats: 1, used formats: 1 + Average record length: 74.62, total records: 21 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 88.00, compression ratio: 1.18 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 24% + Primary pages: 1, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index BUDGETX (3) + Root page: 284, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 9.05, total dup: 7, max dup: 3 + Average key length: 8.00, compression ratio: 1.13 + Average prefix length: 3.62, average data length: 5.38 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$4 (0) + Root page: 208, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 17.95, total dup: 0, max dup: 0 + Average key length: 16.57, compression ratio: 1.16 + Average prefix length: 5.29, average data length: 13.95 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN10 (4) + Root page: 219, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 4.29, total dup: 3, max dup: 3 + Average key length: 3.24, compression ratio: 0.60 + Average prefix length: 0.81, average data length: 1.14 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN6 (2) + Root page: 210, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 4.10, total dup: 13, max dup: 4 + Average key length: 2.95, compression ratio: 0.97 + Average prefix length: 2.05, average data length: 0.81 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY5 (1) + Root page: 209, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 5.24, total dup: 0, max dup: 0 + Average key length: 4.05, compression ratio: 0.74 + Average prefix length: 1.29, average data length: 1.71 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + Primary pointer page: 212, Index root page: 213 + Total formats: 1, used formats: 1 + Average record length: 69.02, total records: 42 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 39.00, compression ratio: 0.57 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 44% + Primary pages: 1, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index NAMEX (3) + Root page: 285, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 19.52, total dup: 0, max dup: 0 + Average key length: 18.50, compression ratio: 0.96 + Average prefix length: 2.17, average data length: 15.52 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN8 (1) + Root page: 215, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 4.07, total dup: 23, max dup: 4 + Average key length: 2.98, compression ratio: 1.01 + Average prefix length: 2.19, average data length: 0.81 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN9 (2) + Root page: 216, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 10.43, total dup: 15, max dup: 4 + Average key length: 9.40, compression ratio: 1.68 + Average prefix length: 9.05, average data length: 6.79 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY7 (0) + Root page: 214, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 4.62, total dup: 0, max dup: 0 + Average key length: 3.60, compression ratio: 0.69 + Average prefix length: 1.17, average data length: 1.31 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (134) + Primary pointer page: 234, Index root page: 235 + Total formats: 1, used formats: 1 + Average record length: 12.00, total records: 28 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 11.00, compression ratio: 0.92 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 10% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN15 (1) + Root page: 237, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 4.29, total dup: 6, max dup: 2 + Average key length: 3.25, compression ratio: 0.74 + Average prefix length: 1.36, average data length: 1.04 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN16 (2) + Root page: 238, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 4.04, total dup: 23, max dup: 9 + Average key length: 2.89, compression ratio: 1.73 + Average prefix length: 4.14, average data length: 0.86 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY14 (0) + Root page: 236, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 13.11, total dup: 0, max dup: 0 + Average key length: 12.07, compression ratio: 0.99 + Average prefix length: 2.89, average data length: 9.11 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + Primary pointer page: 189, Index root page: 190 + Total formats: 1, used formats: 1 + Average record length: 66.13, total records: 31 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 96.00, compression ratio: 1.45 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 54% + Primary pages: 1, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 0 + Blobs: 39, total length: 4840, blob pages: 0 + Level 0: 39, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index MAXSALX (2) + Root page: 286, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.74, total dup: 5, max dup: 1 + Average key length: 13.71, compression ratio: 1.37 + Average prefix length: 7.87, average data length: 10.90 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index MINSALX (3) + Root page: 287, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.06, total dup: 7, max dup: 2 + Average key length: 13.03, compression ratio: 1.44 + Average prefix length: 8.48, average data length: 10.29 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN3 (1) + Root page: 192, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 4.61, total dup: 24, max dup: 20 + Average key length: 3.39, compression ratio: 1.23 + Average prefix length: 2.77, average data length: 1.39 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY2 (0) + Root page: 191, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.45, total dup: 0, max dup: 0 + Average key length: 13.42, compression ratio: 1.24 + Average prefix length: 6.19, average data length: 10.45 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJECT (133) + Primary pointer page: 220, Index root page: 221 + Total formats: 1, used formats: 1 + Average record length: 49.67, total records: 6 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 56.00, compression ratio: 1.13 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 7% + Primary pages: 1, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 0 + Blobs: 6, total length: 548, blob pages: 0 + Level 0: 6, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index PRODTYPEX (3) + Root page: 288, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 26.50, total dup: 0, max dup: 0 + Average key length: 25.33, compression ratio: 1.05 + Average prefix length: 4.17, average data length: 22.50 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$11 (0) + Root page: 222, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 17.33, total dup: 0, max dup: 0 + Average key length: 15.50, compression ratio: 0.88 + Average prefix length: 0.33, average data length: 13.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN13 (2) + Root page: 232, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 4.67, total dup: 0, max dup: 0 + Average key length: 3.50, compression ratio: 0.57 + Average prefix length: 0.67, average data length: 1.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY12 (1) + Root page: 223, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 8.83, total dup: 0, max dup: 0 + Average key length: 7.00, compression ratio: 0.71 + Average prefix length: 0.17, average data length: 4.83 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (135) + Primary pointer page: 239, Index root page: 248 + Total formats: 1, used formats: 1 + Average record length: 30.58, total records: 24 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 32.00, compression ratio: 1.05 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 20% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 24, total length: 1344, blob pages: 0 + Level 0: 24, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN18 (1) + Root page: 250, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 3.92, total dup: 15, max dup: 5 + Average key length: 2.79, compression ratio: 1.07 + Average prefix length: 2.29, average data length: 0.71 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN19 (2) + Root page: 251, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 4.21, total dup: 19, max dup: 8 + Average key length: 3.04, compression ratio: 1.64 + Average prefix length: 4.00, average data length: 1.00 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY17 (0) + Root page: 249, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 10.71, total dup: 0, max dup: 0 + Average key length: 9.67, compression ratio: 1.97 + Average prefix length: 12.17, average data length: 6.83 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (136) + Primary pointer page: 253, Index root page: 254 + Total formats: 1, used formats: 1 + Average record length: 33.29, total records: 49 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 8.00, compression ratio: 0.24 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 30% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CHANGEX (2) + Root page: 289, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 3.37, total dup: 46, max dup: 21 + Average key length: 2.35, compression ratio: 2.98 + Average prefix length: 6.69, average data length: 0.31 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN21 (1) + Root page: 256, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 4.12, total dup: 16, max dup: 2 + Average key length: 3.10, compression ratio: 0.75 + Average prefix length: 1.43, average data length: 0.90 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY20 (0) + Root page: 255, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 22.29, total dup: 0, max dup: 0 + Average key length: 21.27, compression ratio: 1.06 + Average prefix length: 4.31, average data length: 18.29 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index UPDATERX (3) + Root page: 290, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 3.35, total dup: 46, max dup: 28 + Average key length: 2.29, compression ratio: 2.48 + Average prefix length: 5.39, average data length: 0.29 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + Primary pointer page: 267, Index root page: 268 + Total formats: 1, used formats: 1 + Average record length: 68.82, total records: 33 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 8.00, compression ratio: 0.12 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 35% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index NEEDX (3) + Root page: 291, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 5.97, total dup: 11, max dup: 6 + Average key length: 4.94, compression ratio: 1.10 + Average prefix length: 2.88, average data length: 2.55 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index QTYX (4) + Root page: 292, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 5.06, total dup: 11, max dup: 3 + Average key length: 4.03, compression ratio: 3.23 + Average prefix length: 11.18, average data length: 1.85 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN25 (1) + Root page: 270, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 3.55, total dup: 18, max dup: 4 + Average key length: 2.52, compression ratio: 1.19 + Average prefix length: 2.48, average data length: 0.52 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN26 (2) + Root page: 271, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 3.67, total dup: 25, max dup: 7 + Average key length: 2.64, compression ratio: 1.01 + Average prefix length: 2.21, average data length: 0.45 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY24 (0) + Root page: 269, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 8.45, total dup: 0, max dup: 0 + Average key length: 7.42, compression ratio: 1.08 + Average prefix length: 3.52, average data length: 4.48 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index SALESTATX (5) + Root page: 293, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 4.06, total dup: 27, max dup: 14 + Average key length: 3.03, compression ratio: 3.56 + Average prefix length: 9.82, average data length: 0.97 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (147) + Primary pointer page: 323, Index root page: 324 + Total formats: 1, used formats: 0 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (142) + Primary pointer page: 302, Index root page: 303 + Total formats: 1, used formats: 1 + Average record length: 0.00, total records: 4 + Average version length: 14.25, total versions: 4, max versions: 1 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 120.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 8% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 3, total length: 954, blob pages: 0 + Level 0: 3, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T3 (143) + Primary pointer page: 305, Index root page: 306 + Total formats: 1, used formats: 1 + Average record length: 0.00, total records: 3 + Average version length: 22.67, total versions: 3, max versions: 1 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 112.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 3% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Blobs: 2, total length: 313, blob pages: 0 + Level 0: 2, Level 1: 0, Level 2: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T4 (144) + Primary pointer page: 307, Index root page: 308 + Total formats: 1, used formats: 1 + Average record length: 0.00, total records: 2 + Average version length: 75.00, total versions: 2, max versions: 1 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 264.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 3% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T5 (145) + Primary pointer page: 315, Index root page: 316 + Total formats: 1, used formats: 0 + Average record length: 0.00, total records: 0 + Average version length: 0.00, total versions: 0, max versions: 0 + Average fragment length: 0.00, total fragments: 0, max fragments: 0 + Average unpacked length: 0.00, compression ratio: 0.00 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY28 (0) + Root page: 317, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +Gstat completion time Wed Apr 04 15:42:35 2018 + diff -Nru fdb-1.6.1+dfsg1/test/gstat30-s.out fdb-2.0.0/test/gstat30-s.out --- fdb-1.6.1+dfsg1/test/gstat30-s.out 1970-01-01 00:00:00.000000000 +0000 +++ fdb-2.0.0/test/gstat30-s.out 2018-04-26 14:39:03.000000000 +0000 @@ -0,0 +1,1920 @@ + +Database "/home/fdb/test/FBTEST30.FDB" +Gstat execution time Wed Apr 04 15:42:47 2018 + +Database header page information: + Flags 0 + Generation 2180 + System Change Number 24 + Page size 8192 + ODS version 12.0 + Oldest transaction 179 + Oldest active 2145 + Oldest snapshot 2145 + Next transaction 2145 + Sequence number 0 + Next attachment ID 1211 + Implementation HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc + Shadow count 0 + Page buffers 0 + Next header page 0 + Database dialect 3 + Creation date Nov 27, 2015 11:19:39 + Attributes force write + + Variable header data: + Database backup GUID: {F978F787-7023-4C4A-F79D-8D86645B0487} + *END* + + +Database file sequence: +File /home/fdb/test/\FBTEST30.FDB is the only file + +Analyzing database pages ... +AR (140) + Primary pointer page: 297, Index root page: 299 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 86% + Primary pages: 1, secondary pages: 2, swept pages: 0 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 2 + +COUNTRY (128) + Primary pointer page: 182, Index root page: 183 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 8% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY1 (0) + Root page: 186, depth: 1, leaf buckets: 1, nodes: 16 + Average node length: 10.44, total dup: 0, max dup: 0 + Average key length: 8.63, compression ratio: 0.80 + Average prefix length: 0.44, average data length: 6.44 + Clustering factor: 1, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +CUSTOMER (137) + Primary pointer page: 261, Index root page: 262 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 26% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTNAMEX (2) + Root page: 276, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 19.87, total dup: 0, max dup: 0 + Average key length: 18.27, compression ratio: 0.90 + Average prefix length: 0.60, average data length: 15.87 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CUSTREGION (3) + Root page: 283, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 21.27, total dup: 0, max dup: 0 + Average key length: 20.20, compression ratio: 0.97 + Average prefix length: 2.33, average data length: 17.27 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN23 (1) + Root page: 264, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 8.60, total dup: 4, max dup: 4 + Average key length: 6.93, compression ratio: 0.83 + Average prefix length: 0.87, average data length: 4.87 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY22 (0) + Root page: 263, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 4.20, total dup: 0, max dup: 0 + Average key length: 3.13, compression ratio: 0.96 + Average prefix length: 1.87, average data length: 1.13 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +DEPARTMENT (130) + Primary pointer page: 198, Index root page: 199 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 24% + Primary pages: 1, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index BUDGETX (3) + Root page: 284, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 9.05, total dup: 7, max dup: 3 + Average key length: 8.00, compression ratio: 1.13 + Average prefix length: 3.62, average data length: 5.38 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$4 (0) + Root page: 208, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 17.95, total dup: 0, max dup: 0 + Average key length: 16.57, compression ratio: 1.16 + Average prefix length: 5.29, average data length: 13.95 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN10 (4) + Root page: 219, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 4.29, total dup: 3, max dup: 3 + Average key length: 3.24, compression ratio: 0.60 + Average prefix length: 0.81, average data length: 1.14 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN6 (2) + Root page: 210, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 4.10, total dup: 13, max dup: 4 + Average key length: 2.95, compression ratio: 0.97 + Average prefix length: 2.05, average data length: 0.81 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY5 (1) + Root page: 209, depth: 1, leaf buckets: 1, nodes: 21 + Average node length: 5.24, total dup: 0, max dup: 0 + Average key length: 4.05, compression ratio: 0.74 + Average prefix length: 1.29, average data length: 1.71 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE (131) + Primary pointer page: 212, Index root page: 213 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 44% + Primary pages: 1, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index NAMEX (3) + Root page: 285, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 19.52, total dup: 0, max dup: 0 + Average key length: 18.50, compression ratio: 0.96 + Average prefix length: 2.17, average data length: 15.52 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN8 (1) + Root page: 215, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 4.07, total dup: 23, max dup: 4 + Average key length: 2.98, compression ratio: 1.01 + Average prefix length: 2.19, average data length: 0.81 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN9 (2) + Root page: 216, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 10.43, total dup: 15, max dup: 4 + Average key length: 9.40, compression ratio: 1.68 + Average prefix length: 9.05, average data length: 6.79 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY7 (0) + Root page: 214, depth: 1, leaf buckets: 1, nodes: 42 + Average node length: 4.62, total dup: 0, max dup: 0 + Average key length: 3.60, compression ratio: 0.69 + Average prefix length: 1.17, average data length: 1.31 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +EMPLOYEE_PROJECT (134) + Primary pointer page: 234, Index root page: 235 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 10% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN15 (1) + Root page: 237, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 4.29, total dup: 6, max dup: 2 + Average key length: 3.25, compression ratio: 0.74 + Average prefix length: 1.36, average data length: 1.04 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN16 (2) + Root page: 238, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 4.04, total dup: 23, max dup: 9 + Average key length: 2.89, compression ratio: 1.73 + Average prefix length: 4.14, average data length: 0.86 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY14 (0) + Root page: 236, depth: 1, leaf buckets: 1, nodes: 28 + Average node length: 13.11, total dup: 0, max dup: 0 + Average key length: 12.07, compression ratio: 0.99 + Average prefix length: 2.89, average data length: 9.11 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +JOB (129) + Primary pointer page: 189, Index root page: 190 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 54% + Primary pages: 1, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index MAXSALX (2) + Root page: 286, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.74, total dup: 5, max dup: 1 + Average key length: 13.71, compression ratio: 1.37 + Average prefix length: 7.87, average data length: 10.90 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index MINSALX (3) + Root page: 287, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.06, total dup: 7, max dup: 2 + Average key length: 13.03, compression ratio: 1.44 + Average prefix length: 8.48, average data length: 10.29 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN3 (1) + Root page: 192, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 4.61, total dup: 24, max dup: 20 + Average key length: 3.39, compression ratio: 1.23 + Average prefix length: 2.77, average data length: 1.39 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY2 (0) + Root page: 191, depth: 1, leaf buckets: 1, nodes: 31 + Average node length: 14.45, total dup: 0, max dup: 0 + Average key length: 13.42, compression ratio: 1.24 + Average prefix length: 6.19, average data length: 10.45 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJECT (133) + Primary pointer page: 220, Index root page: 221 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 7% + Primary pages: 1, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index PRODTYPEX (3) + Root page: 288, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 26.50, total dup: 0, max dup: 0 + Average key length: 25.33, compression ratio: 1.05 + Average prefix length: 4.17, average data length: 22.50 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$11 (0) + Root page: 222, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 17.33, total dup: 0, max dup: 0 + Average key length: 15.50, compression ratio: 0.88 + Average prefix length: 0.33, average data length: 13.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN13 (2) + Root page: 232, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 4.67, total dup: 0, max dup: 0 + Average key length: 3.50, compression ratio: 0.57 + Average prefix length: 0.67, average data length: 1.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY12 (1) + Root page: 223, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 8.83, total dup: 0, max dup: 0 + Average key length: 7.00, compression ratio: 0.71 + Average prefix length: 0.17, average data length: 4.83 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +PROJ_DEPT_BUDGET (135) + Primary pointer page: 239, Index root page: 248 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 20% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN18 (1) + Root page: 250, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 3.92, total dup: 15, max dup: 5 + Average key length: 2.79, compression ratio: 1.07 + Average prefix length: 2.29, average data length: 0.71 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN19 (2) + Root page: 251, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 4.21, total dup: 19, max dup: 8 + Average key length: 3.04, compression ratio: 1.64 + Average prefix length: 4.00, average data length: 1.00 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY17 (0) + Root page: 249, depth: 1, leaf buckets: 1, nodes: 24 + Average node length: 10.71, total dup: 0, max dup: 0 + Average key length: 9.67, compression ratio: 1.97 + Average prefix length: 12.17, average data length: 6.83 + Clustering factor: 1, ratio: 0.04 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$AUTH_MAPPING (45) + Primary pointer page: 72, Index root page: 73 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_52 (0) + Root page: 146, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$BACKUP_HISTORY (32) + Primary pointer page: 68, Index root page: 69 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 14% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_44 (0) + Root page: 138, depth: 1, leaf buckets: 1, nodes: 8 + Average node length: 5.13, total dup: 0, max dup: 0 + Average key length: 4.00, compression ratio: 1.97 + Average prefix length: 6.00, average data length: 1.88 + Clustering factor: 1, ratio: 0.13 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$CHARACTER_SETS (28) + Primary pointer page: 60, Index root page: 61 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 45% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_19 (0) + Root page: 113, depth: 1, leaf buckets: 1, nodes: 52 + Average node length: 6.42, total dup: 0, max dup: 0 + Average key length: 5.15, compression ratio: 1.31 + Average prefix length: 3.79, average data length: 2.98 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_25 (1) + Root page: 119, depth: 1, leaf buckets: 1, nodes: 52 + Average node length: 4.08, total dup: 0, max dup: 0 + Average key length: 3.02, compression ratio: 0.75 + Average prefix length: 1.23, average data length: 1.04 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$CHECK_CONSTRAINTS (24) + Primary pointer page: 52, Index root page: 53 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 36% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_14 (0) + Root page: 106, depth: 1, leaf buckets: 1, nodes: 68 + Average node length: 3.93, total dup: 14, max dup: 1 + Average key length: 2.91, compression ratio: 2.71 + Average prefix length: 6.99, average data length: 0.90 + Clustering factor: 1, ratio: 0.01 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_40 (1) + Root page: 134, depth: 1, leaf buckets: 1, nodes: 68 + Average node length: 7.37, total dup: 10, max dup: 2 + Average key length: 6.16, compression ratio: 1.35 + Average prefix length: 4.43, average data length: 3.91 + Clustering factor: 1, ratio: 0.01 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$COLLATIONS (29) + Primary pointer page: 62, Index root page: 63 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 41% + Primary pages: 2, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 1 + + Index RDB$INDEX_20 (0) + Root page: 114, depth: 1, leaf buckets: 1, nodes: 150 + Average node length: 7.58, total dup: 0, max dup: 0 + Average key length: 6.47, compression ratio: 1.20 + Average prefix length: 3.95, average data length: 3.79 + Clustering factor: 24, ratio: 0.16 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_26 (1) + Root page: 120, depth: 1, leaf buckets: 1, nodes: 150 + Average node length: 5.08, total dup: 0, max dup: 0 + Average key length: 4.07, compression ratio: 2.03 + Average prefix length: 6.46, average data length: 1.82 + Clustering factor: 32, ratio: 0.21 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$DATABASE (1) + Primary pointer page: 6, Index root page: 7 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 1% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$DB_CREATORS (47) + Primary pointer page: 74, Index root page: 75 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$DEPENDENCIES (13) + Primary pointer page: 30, Index root page: 31 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 43% + Primary pages: 3, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 2 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_27 (0) + Root page: 121, depth: 1, leaf buckets: 1, nodes: 168 + Average node length: 4.30, total dup: 120, max dup: 13 + Average key length: 3.23, compression ratio: 2.98 + Average prefix length: 8.46, average data length: 1.18 + Clustering factor: 15, ratio: 0.09 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_28 (1) + Root page: 122, depth: 1, leaf buckets: 1, nodes: 168 + Average node length: 4.15, total dup: 147, max dup: 36 + Average key length: 3.10, compression ratio: 2.55 + Average prefix length: 6.85, average data length: 1.04 + Clustering factor: 22, ratio: 0.13 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$EXCEPTIONS (30) + Primary pointer page: 64, Index root page: 65 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 7% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_23 (0) + Root page: 117, depth: 1, leaf buckets: 1, nodes: 5 + Average node length: 18.00, total dup: 0, max dup: 0 + Average key length: 16.20, compression ratio: 0.98 + Average prefix length: 1.80, average data length: 14.00 + Clustering factor: 1, ratio: 0.20 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_24 (1) + Root page: 118, depth: 1, leaf buckets: 1, nodes: 5 + Average node length: 4.40, total dup: 0, max dup: 0 + Average key length: 3.00, compression ratio: 0.60 + Average prefix length: 0.60, average data length: 1.20 + Clustering factor: 1, ratio: 0.20 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FIELDS (2) + Primary pointer page: 8, Index root page: 9 + Pointer pages: 1, data page slots: 5 + Data pages: 5, average fill: 57% + Primary pages: 4, secondary pages: 1, swept pages: 1 + Empty pages: 0, full pages: 2 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 2 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 1 + + Index RDB$INDEX_2 (0) + Root page: 94, depth: 1, leaf buckets: 1, nodes: 290 + Average node length: 8.20, total dup: 0, max dup: 0 + Average key length: 7.16, compression ratio: 1.54 + Average prefix length: 6.37, average data length: 4.63 + Clustering factor: 71, ratio: 0.24 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FIELD_DIMENSIONS (21) + Primary pointer page: 46, Index root page: 47 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 9% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_36 (0) + Root page: 130, depth: 1, leaf buckets: 1, nodes: 19 + Average node length: 4.47, total dup: 3, max dup: 2 + Average key length: 3.42, compression ratio: 1.74 + Average prefix length: 4.68, average data length: 1.26 + Clustering factor: 1, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FILES (10) + Primary pointer page: 24, Index root page: 25 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FILTERS (16) + Primary pointer page: 36, Index root page: 37 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_17 (0) + Root page: 111, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_45 (1) + Root page: 139, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FORMATS (8) + Primary pointer page: 20, Index root page: 21 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 18% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_16 (0) + Root page: 110, depth: 1, leaf buckets: 1, nodes: 17 + Average node length: 9.29, total dup: 0, max dup: 0 + Average key length: 8.24, compression ratio: 0.97 + Average prefix length: 2.71, average data length: 5.29 + Clustering factor: 1, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FUNCTIONS (14) + Primary pointer page: 32, Index root page: 33 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 12% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_53 (1) + Root page: 147, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 4.67, total dup: 0, max dup: 0 + Average key length: 3.33, compression ratio: 0.60 + Average prefix length: 0.67, average data length: 1.33 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_9 (0) + Root page: 101, depth: 1, leaf buckets: 1, nodes: 6 + Average node length: 7.00, total dup: 0, max dup: 0 + Average key length: 5.67, compression ratio: 1.09 + Average prefix length: 2.67, average data length: 3.50 + Clustering factor: 1, ratio: 0.17 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$FUNCTION_ARGUMENTS (15) + Primary pointer page: 34, Index root page: 35 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 10% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_10 (0) + Root page: 102, depth: 1, leaf buckets: 1, nodes: 12 + Average node length: 5.00, total dup: 6, max dup: 2 + Average key length: 3.83, compression ratio: 1.61 + Average prefix length: 4.42, average data length: 1.75 + Clustering factor: 1, ratio: 0.08 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_49 (1) + Root page: 143, depth: 1, leaf buckets: 1, nodes: 12 + Average node length: 6.33, total dup: 0, max dup: 0 + Average key length: 5.08, compression ratio: 1.43 + Average prefix length: 4.33, average data length: 2.92 + Clustering factor: 1, ratio: 0.08 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_51 (2) + Root page: 145, depth: 1, leaf buckets: 1, nodes: 12 + Average node length: 4.25, total dup: 10, max dup: 10 + Average key length: 3.17, compression ratio: 0.66 + Average prefix length: 0.00, average data length: 2.08 + Clustering factor: 1, ratio: 0.08 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$GENERATORS (20) + Primary pointer page: 44, Index root page: 45 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 9% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_11 (0) + Root page: 103, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 14.27, total dup: 0, max dup: 0 + Average key length: 13.00, compression ratio: 1.03 + Average prefix length: 3.00, average data length: 10.33 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_46 (1) + Root page: 140, depth: 1, leaf buckets: 1, nodes: 15 + Average node length: 4.13, total dup: 0, max dup: 0 + Average key length: 3.00, compression ratio: 0.64 + Average prefix length: 0.87, average data length: 1.07 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$INDEX_SEGMENTS (3) + Primary pointer page: 10, Index root page: 11 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 41% + Primary pages: 2, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_6 (0) + Root page: 98, depth: 1, leaf buckets: 1, nodes: 122 + Average node length: 4.68, total dup: 29, max dup: 2 + Average key length: 3.61, compression ratio: 3.12 + Average prefix length: 9.70, average data length: 1.54 + Clustering factor: 12, ratio: 0.10 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$INDICES (4) + Primary pointer page: 12, Index root page: 13 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 38% + Primary pages: 2, secondary pages: 0, swept pages: 1 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_31 (1) + Root page: 125, depth: 1, leaf buckets: 1, nodes: 93 + Average node length: 7.67, total dup: 51, max dup: 5 + Average key length: 6.58, compression ratio: 2.02 + Average prefix length: 9.09, average data length: 4.22 + Clustering factor: 2, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_41 (2) + Root page: 135, depth: 1, leaf buckets: 1, nodes: 93 + Average node length: 2.34, total dup: 86, max dup: 78 + Average key length: 1.33, compression ratio: 1.38 + Average prefix length: 1.66, average data length: 0.18 + Clustering factor: 3, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_5 (0) + Root page: 97, depth: 1, leaf buckets: 1, nodes: 93 + Average node length: 5.20, total dup: 0, max dup: 0 + Average key length: 4.11, compression ratio: 2.76 + Average prefix length: 9.30, average data length: 2.02 + Clustering factor: 5, ratio: 0.05 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$LOG_FILES (25) + Primary pointer page: 54, Index root page: 55 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PACKAGES (42) + Primary pointer page: 70, Index root page: 71 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 6% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_47 (0) + Root page: 141, depth: 1, leaf buckets: 1, nodes: 2 + Average node length: 6.00, total dup: 0, max dup: 0 + Average key length: 4.50, compression ratio: 1.00 + Average prefix length: 2.00, average data length: 2.50 + Clustering factor: 1, ratio: 0.50 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PAGES (0) + Primary pointer page: 3, Index root page: 4 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 41% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PROCEDURES (26) + Primary pointer page: 56, Index root page: 57 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 61% + Primary pages: 1, secondary pages: 2, swept pages: 0 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 1 + + Index RDB$INDEX_21 (0) + Root page: 115, depth: 1, leaf buckets: 1, nodes: 11 + Average node length: 16.45, total dup: 0, max dup: 0 + Average key length: 15.27, compression ratio: 0.90 + Average prefix length: 1.36, average data length: 12.45 + Clustering factor: 1, ratio: 0.09 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_22 (1) + Root page: 116, depth: 1, leaf buckets: 1, nodes: 11 + Average node length: 4.18, total dup: 0, max dup: 0 + Average key length: 3.00, compression ratio: 0.64 + Average prefix length: 0.82, average data length: 1.09 + Clustering factor: 1, ratio: 0.09 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$PROCEDURE_PARAMETERS (27) + Primary pointer page: 58, Index root page: 59 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 26% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_18 (0) + Root page: 112, depth: 1, leaf buckets: 1, nodes: 35 + Average node length: 14.89, total dup: 0, max dup: 0 + Average key length: 13.83, compression ratio: 1.71 + Average prefix length: 12.66, average data length: 11.06 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_48 (1) + Root page: 142, depth: 1, leaf buckets: 1, nodes: 35 + Average node length: 4.49, total dup: 0, max dup: 0 + Average key length: 3.46, compression ratio: 1.75 + Average prefix length: 4.74, average data length: 1.31 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_50 (2) + Root page: 144, depth: 1, leaf buckets: 1, nodes: 35 + Average node length: 2.00, total dup: 34, max dup: 34 + Average key length: 1.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$REF_CONSTRAINTS (23) + Primary pointer page: 50, Index root page: 51 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 12% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_13 (0) + Root page: 105, depth: 1, leaf buckets: 1, nodes: 14 + Average node length: 5.43, total dup: 0, max dup: 0 + Average key length: 4.36, compression ratio: 1.84 + Average prefix length: 6.07, average data length: 1.93 + Clustering factor: 1, ratio: 0.07 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATIONS (6) + Primary pointer page: 16, Index root page: 17 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 63% + Primary pages: 1, secondary pages: 2, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 1 + + Index RDB$INDEX_0 (0) + Root page: 90, depth: 1, leaf buckets: 1, nodes: 67 + Average node length: 12.87, total dup: 0, max dup: 0 + Average key length: 11.72, compression ratio: 1.13 + Average prefix length: 4.27, average data length: 8.94 + Clustering factor: 1, ratio: 0.01 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_1 (1) + Root page: 93, depth: 1, leaf buckets: 1, nodes: 67 + Average node length: 4.03, total dup: 0, max dup: 0 + Average key length: 2.99, compression ratio: 0.78 + Average prefix length: 1.30, average data length: 1.01 + Clustering factor: 1, ratio: 0.01 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATION_CONSTRAINTS (22) + Primary pointer page: 48, Index root page: 49 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 47% + Primary pages: 2, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 1 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_12 (0) + Root page: 104, depth: 1, leaf buckets: 1, nodes: 108 + Average node length: 4.22, total dup: 0, max dup: 0 + Average key length: 3.20, compression ratio: 2.77 + Average prefix length: 7.69, average data length: 1.18 + Clustering factor: 9, ratio: 0.08 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_42 (1) + Root page: 136, depth: 1, leaf buckets: 1, nodes: 108 + Average node length: 12.20, total dup: 50, max dup: 8 + Average key length: 11.19, compression ratio: 2.17 + Average prefix length: 15.64, average data length: 8.67 + Clustering factor: 6, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_43 (2) + Root page: 137, depth: 1, leaf buckets: 1, nodes: 108 + Average node length: 3.35, total dup: 53, max dup: 53 + Average key length: 2.34, compression ratio: 2.55 + Average prefix length: 5.20, average data length: 0.78 + Clustering factor: 7, ratio: 0.06 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$RELATION_FIELDS (5) + Primary pointer page: 14, Index root page: 15 + Pointer pages: 1, data page slots: 16 + Data pages: 16, average fill: 39% + Primary pages: 15, secondary pages: 1, swept pages: 1 + Empty pages: 7, full pages: 7 + Fill distribution: + 0 - 19% = 8 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 7 + 80 - 99% = 1 + + Index RDB$INDEX_15 (2) + Root page: 109, depth: 2, leaf buckets: 3, nodes: 583 + Average node length: 25.07, total dup: 0, max dup: 0 + Average key length: 24.06, compression ratio: 1.49 + Average prefix length: 14.84, average data length: 21.08 + Clustering factor: 301, ratio: 0.52 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 2 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_3 (0) + Root page: 95, depth: 1, leaf buckets: 1, nodes: 583 + Average node length: 5.47, total dup: 341, max dup: 23 + Average key length: 4.45, compression ratio: 2.90 + Average prefix length: 10.71, average data length: 2.19 + Clustering factor: 239, ratio: 0.41 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_4 (1) + Root page: 96, depth: 1, leaf buckets: 1, nodes: 583 + Average node length: 4.13, total dup: 516, max dup: 29 + Average key length: 3.12, compression ratio: 4.21 + Average prefix length: 12.09, average data length: 1.03 + Clustering factor: 51, ratio: 0.09 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$ROLES (31) + Primary pointer page: 66, Index root page: 67 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 1% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_39 (0) + Root page: 133, depth: 1, leaf buckets: 1, nodes: 2 + Average node length: 13.00, total dup: 0, max dup: 0 + Average key length: 11.00, compression ratio: 0.82 + Average prefix length: 0.00, average data length: 9.00 + Clustering factor: 1, ratio: 0.50 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$SECURITY_CLASSES (9) + Primary pointer page: 22, Index root page: 23 + Pointer pages: 1, data page slots: 16 + Data pages: 16, average fill: 54% + Primary pages: 10, secondary pages: 6, swept pages: 1 + Empty pages: 4, full pages: 10 + Fill distribution: + 0 - 19% = 4 + 20 - 39% = 1 + 40 - 59% = 1 + 60 - 79% = 5 + 80 - 99% = 5 + + Index RDB$INDEX_7 (0) + Root page: 99, depth: 1, leaf buckets: 1, nodes: 710 + Average node length: 4.27, total dup: 0, max dup: 0 + Average key length: 3.15, compression ratio: 2.39 + Average prefix length: 6.38, average data length: 1.13 + Clustering factor: 123, ratio: 0.17 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRANSACTIONS (19) + Primary pointer page: 42, Index root page: 43 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_32 (0) + Root page: 126, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRIGGERS (12) + Primary pointer page: 28, Index root page: 29 + Pointer pages: 1, data page slots: 4 + Data pages: 4, average fill: 81% + Primary pages: 1, secondary pages: 3, swept pages: 0 + Empty pages: 0, full pages: 2 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 2 + 80 - 99% = 2 + + Index RDB$INDEX_38 (1) + Root page: 132, depth: 1, leaf buckets: 1, nodes: 63 + Average node length: 5.84, total dup: 46, max dup: 18 + Average key length: 4.75, compression ratio: 2.52 + Average prefix length: 9.35, average data length: 2.60 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_8 (0) + Root page: 100, depth: 1, leaf buckets: 1, nodes: 63 + Average node length: 5.32, total dup: 0, max dup: 0 + Average key length: 4.24, compression ratio: 2.57 + Average prefix length: 8.71, average data length: 2.19 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TRIGGER_MESSAGES (17) + Primary pointer page: 38, Index root page: 39 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 33% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_35 (0) + Root page: 129, depth: 1, leaf buckets: 1, nodes: 35 + Average node length: 4.03, total dup: 12, max dup: 5 + Average key length: 3.00, compression ratio: 4.58 + Average prefix length: 12.74, average data length: 1.00 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$TYPES (11) + Primary pointer page: 26, Index root page: 27 + Pointer pages: 1, data page slots: 3 + Data pages: 3, average fill: 64% + Primary pages: 3, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 2 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 2 + 80 - 99% = 0 + + Index RDB$INDEX_37 (0) + Root page: 131, depth: 1, leaf buckets: 1, nodes: 254 + Average node length: 8.22, total dup: 15, max dup: 2 + Average key length: 7.14, compression ratio: 1.18 + Average prefix length: 3.85, average data length: 4.57 + Clustering factor: 84, ratio: 0.33 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$USER_PRIVILEGES (18) + Primary pointer page: 40, Index root page: 41 + Pointer pages: 1, data page slots: 16 + Data pages: 16, average fill: 51% + Primary pages: 16, secondary pages: 0, swept pages: 0 + Empty pages: 4, full pages: 11 + Fill distribution: + 0 - 19% = 5 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 11 + 80 - 99% = 0 + + Index RDB$INDEX_29 (0) + Root page: 123, depth: 1, leaf buckets: 1, nodes: 1026 + Average node length: 6.01, total dup: 448, max dup: 9 + Average key length: 4.78, compression ratio: 2.30 + Average prefix length: 8.54, average data length: 2.45 + Clustering factor: 359, ratio: 0.35 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 1 + 80 - 99% = 0 + + Index RDB$INDEX_30 (1) + Root page: 124, depth: 1, leaf buckets: 1, nodes: 1026 + Average node length: 3.22, total dup: 1024, max dup: 910 + Average key length: 2.01, compression ratio: 2.98 + Average prefix length: 5.99, average data length: 0.01 + Clustering factor: 18, ratio: 0.02 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 1 + 60 - 79% = 0 + 80 - 99% = 0 + +RDB$VIEW_RELATIONS (7) + Primary pointer page: 18, Index root page: 19 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 2% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_33 (0) + Root page: 127, depth: 1, leaf buckets: 1, nodes: 2 + Average node length: 8.50, total dup: 1, max dup: 1 + Average key length: 7.00, compression ratio: 1.43 + Average prefix length: 5.00, average data length: 5.00 + Clustering factor: 1, ratio: 0.50 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$INDEX_34 (1) + Root page: 128, depth: 1, leaf buckets: 1, nodes: 2 + Average node length: 13.00, total dup: 0, max dup: 0 + Average key length: 11.00, compression ratio: 0.82 + Average prefix length: 0.00, average data length: 9.00 + Clustering factor: 1, ratio: 0.50 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALARY_HISTORY (136) + Primary pointer page: 253, Index root page: 254 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 30% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index CHANGEX (2) + Root page: 289, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 3.37, total dup: 46, max dup: 21 + Average key length: 2.35, compression ratio: 2.98 + Average prefix length: 6.69, average data length: 0.31 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN21 (1) + Root page: 256, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 4.12, total dup: 16, max dup: 2 + Average key length: 3.10, compression ratio: 0.75 + Average prefix length: 1.43, average data length: 0.90 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY20 (0) + Root page: 255, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 22.29, total dup: 0, max dup: 0 + Average key length: 21.27, compression ratio: 1.06 + Average prefix length: 4.31, average data length: 18.29 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index UPDATERX (3) + Root page: 290, depth: 1, leaf buckets: 1, nodes: 49 + Average node length: 3.35, total dup: 46, max dup: 28 + Average key length: 2.29, compression ratio: 2.48 + Average prefix length: 5.39, average data length: 0.29 + Clustering factor: 1, ratio: 0.02 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +SALES (138) + Primary pointer page: 267, Index root page: 268 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 35% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 1 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index NEEDX (3) + Root page: 291, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 5.97, total dup: 11, max dup: 6 + Average key length: 4.94, compression ratio: 1.10 + Average prefix length: 2.88, average data length: 2.55 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index QTYX (4) + Root page: 292, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 5.06, total dup: 11, max dup: 3 + Average key length: 4.03, compression ratio: 3.23 + Average prefix length: 11.18, average data length: 1.85 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN25 (1) + Root page: 270, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 3.55, total dup: 18, max dup: 4 + Average key length: 2.52, compression ratio: 1.19 + Average prefix length: 2.48, average data length: 0.52 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$FOREIGN26 (2) + Root page: 271, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 3.67, total dup: 25, max dup: 7 + Average key length: 2.64, compression ratio: 1.01 + Average prefix length: 2.21, average data length: 0.45 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY24 (0) + Root page: 269, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 8.45, total dup: 0, max dup: 0 + Average key length: 7.42, compression ratio: 1.08 + Average prefix length: 3.52, average data length: 4.48 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index SALESTATX (5) + Root page: 293, depth: 1, leaf buckets: 1, nodes: 33 + Average node length: 4.06, total dup: 27, max dup: 14 + Average key length: 3.03, compression ratio: 3.56 + Average prefix length: 9.82, average data length: 0.97 + Clustering factor: 1, ratio: 0.03 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T (147) + Primary pointer page: 323, Index root page: 324 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T2 (142) + Primary pointer page: 302, Index root page: 303 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 8% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T3 (143) + Primary pointer page: 305, Index root page: 306 + Pointer pages: 1, data page slots: 2 + Data pages: 2, average fill: 3% + Primary pages: 1, secondary pages: 1, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 2 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T4 (144) + Primary pointer page: 307, Index root page: 308 + Pointer pages: 1, data page slots: 1 + Data pages: 1, average fill: 3% + Primary pages: 1, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +T5 (145) + Primary pointer page: 315, Index root page: 316 + Pointer pages: 1, data page slots: 0 + Data pages: 0, average fill: 0% + Primary pages: 0, secondary pages: 0, swept pages: 0 + Empty pages: 0, full pages: 0 + Fill distribution: + 0 - 19% = 0 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + + Index RDB$PRIMARY28 (0) + Root page: 317, depth: 1, leaf buckets: 1, nodes: 0 + Average node length: 0.00, total dup: 0, max dup: 0 + Average key length: 0.00, compression ratio: 0.00 + Average prefix length: 0.00, average data length: 0.00 + Clustering factor: 0, ratio: 0.00 + Fill distribution: + 0 - 19% = 1 + 20 - 39% = 0 + 40 - 59% = 0 + 60 - 79% = 0 + 80 - 99% = 0 + +Gstat completion time Wed Apr 04 15:42:47 2018 + diff -Nru fdb-1.6.1+dfsg1/test/testfdb.py fdb-2.0.0/test/testfdb.py --- fdb-1.6.1+dfsg1/test/testfdb.py 2016-11-24 12:03:52.000000000 +0000 +++ fdb-2.0.0/test/testfdb.py 2018-04-26 14:39:03.000000000 +0000 @@ -2,7 +2,7 @@ # # PROGRAM/MODULE: fdb # FILE: testfdb.py -# DESCRIPTION: Python driver for Firebird +# DESCRIPTION: Python driver for Firebird - Unit tests # CREATED: 12.10.2011 # # Software distributed under the License is distributed AS IS, @@ -12,7 +12,7 @@ # # The Original Code was created by Pavel Cisar # -# Copyright (c) 2011 Pavel Cisar +# Copyright (c) Pavel Cisar # and all contributors signed below. # # All Rights Reserved. @@ -26,16 +26,25 @@ import fdb import fdb.ibase as ibase import fdb.schema as sm +import fdb.utils as utils +import fdb.gstat as gstat +import fdb.log as log import sys, os import threading import time +import collections from decimal import Decimal from contextlib import closing +from re import finditer +from pprint import pprint +from fdb.gstat import FillDistribution, Encryption, StatDatabase +from fdb.log import LogEntry +from locale import LC_ALL, getlocale, setlocale, getdefaultlocale if ibase.PYTHON_MAJOR_VER == 3: from io import StringIO, BytesIO else: - from StringIO import StringIO + from cStringIO import StringIO BytesIO = StringIO FB20 = '2.0' @@ -51,54 +60,60 @@ # Default user password FBTEST_PASSWORD = 'masterkey' -class SchemaVisitor(fdb.schema.SchemaVisitor): - def __init__(self,test,action,follow='dependencies'): +def linesplit_iter(string): + return (m.group(2) for m in finditer('((.*)\n|(.+)$)', string)) + +def get_object_data(obj, skip=[]): + def add(item): + if item not in skip: + value = getattr(obj, item) + if isinstance(value, collections.Sized) and isinstance(value, (collections.MutableSequence, collections.Mapping)): + value = len(value) + data[item] = value + + data = {} + for item in utils.iter_class_variables(obj): + add(item) + for item in utils.iter_class_properties(obj): + add(item) + return data + +class SchemaVisitor(fdb.utils.Visitor): + def __init__(self, test, action, follow='dependencies'): self.test = test self.seen = [] self.action = action self.follow = follow - def default_action(self,obj): + def default_action(self, obj): if not obj.issystemobject() and self.action in obj.actions: if self.follow == 'dependencies': for dependency in obj.get_dependencies(): d = dependency.depended_on if d and d not in self.seen: - d.accept_visitor(self) + d.accept(self) elif self.follow == 'dependents': for dependency in obj.get_dependents(): d = dependency.dependent if d and d not in self.seen: - d.accept_visitor(self) + d.accept(self) if obj not in self.seen: self.test.printout(obj.get_sql_for(self.action)) self.seen.append(obj) - def visitSchema(self,schema): - pass - def visitMetadataItem(self,item): - pass - def visitTableColumn(self,column): - column.table.accept_visitor(self) - def visitViewColumn(self,column): - column.view.accept_visitor(self) - def visitDependency(self,dependency): - pass - def visitConstraint(self,constraint): - pass - def visitProcedureParameter(self,param): - param.procedure.accept_visitor(self) - def visitFunctionArgument(self,arg): - arg.function.accept_visitor(self) - def visitDatabaseFile(self,dbfile): - pass - def visitShadow(self,shadow): - pass + def visit_TableColumn(self, column): + column.table.accept(self) + def visit_ViewColumn(self, column): + column.view.accept(self) + def visit_ProcedureParameter(self, param): + param.procedure.accept(self) + def visit_FunctionArgument(self, arg): + arg.function.accept(self) class FDBTestBase(unittest.TestCase): def __init__(self, methodName='runTest'): - super(FDBTestBase,self).__init__(methodName) + super(FDBTestBase, self).__init__(methodName) self.output = StringIO() def setUp(self): - with closing(fdb.services.connect(host=FBTEST_HOST,password=FBTEST_PASSWORD)) as svc: + with closing(fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD)) as svc: self.version = svc.version if self.version.startswith('2.0'): self.FBTEST_DB = 'fbtest20.fdb' @@ -114,117 +129,133 @@ self.version = FB30 else: raise Exception("Unsupported Firebird version (%s)" % self.version) + # + self.cwd = os.getcwd() + self.dbpath = self.cwd if os.path.split(self.cwd)[1] == 'test' \ + else os.path.join(self.cwd, 'test') def clear_output(self): self.output.close() self.output = StringIO() def show_output(self): sys.stdout.write(self.output.getvalue()) sys.stdout.flush() - def printout(self,text='',newline=True): - self.output.write(text) + def printout(self, text='', newline=True, no_rstrip=False): + if no_rstrip: + self.output.write(text) + else: + self.output.write(text.rstrip()) if newline: self.output.write('\n') self.output.flush() - def printData(self,cur): + def printData(self, cur, print_header=True): """Print data from open cursor to stdout.""" - # Print a header. - for fieldDesc in cur.description: - self.printout(fieldDesc[fdb.DESCRIPTION_NAME].ljust(fieldDesc[fdb.DESCRIPTION_DISPLAY_SIZE]),newline=False) - self.printout() - for fieldDesc in cur.description: - self.printout("-" * max((len(fieldDesc[fdb.DESCRIPTION_NAME]),fieldDesc[fdb.DESCRIPTION_DISPLAY_SIZE])),newline=False) - self.printout() + if print_header: + # Print a header. + line = [] + for fieldDesc in cur.description: + line.append(fieldDesc[fdb.DESCRIPTION_NAME].ljust(fieldDesc[fdb.DESCRIPTION_DISPLAY_SIZE])) + self.printout(' '.join(line)) + line = [] + for fieldDesc in cur.description: + line.append("-" * max((len(fieldDesc[fdb.DESCRIPTION_NAME]), fieldDesc[fdb.DESCRIPTION_DISPLAY_SIZE]))) + self.printout(' '.join(line)) # For each row, print the value of each field left-justified within # the maximum possible width of that field. fieldIndices = range(len(cur.description)) for row in cur: + line = [] for fieldIndex in fieldIndices: fieldValue = str(row[fieldIndex]) - fieldMaxWidth = max((len(cur.description[fieldIndex][fdb.DESCRIPTION_NAME]),cur.description[fieldIndex][fdb.DESCRIPTION_DISPLAY_SIZE])) - self.printout(fieldValue.ljust(fieldMaxWidth),newline=False) - self.printout() - + fieldMaxWidth = max((len(cur.description[fieldIndex][fdb.DESCRIPTION_NAME]), cur.description[fieldIndex][fdb.DESCRIPTION_DISPLAY_SIZE])) + line.append(fieldValue.ljust(fieldMaxWidth)) + self.printout(' '.join(line)) class TestCreateDrop(FDBTestBase): def setUp(self): - super(TestCreateDrop,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,'droptest.fdb') + super(TestCreateDrop, self).setUp() + self.dbfile = os.path.join(self.dbpath, 'droptest.fdb') if os.path.exists(self.dbfile): os.remove(self.dbfile) def test_create_drop(self): - with closing(fdb.create_database(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD)) as con: - self.assertEqual(con.sql_dialect,3) - self.assertEqual(con.charset,None) + with closing(fdb.create_database(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD)) as con: + self.assertEqual(con.sql_dialect, 3) + self.assertEqual(con.charset, None) con.drop_database() # - with closing(fdb.create_database(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD, - sql_dialect=1,charset='UTF8')) as con: - self.assertEqual(con.sql_dialect,1) - self.assertEqual(con.charset,'UTF8') + with closing(fdb.create_database(host=FBTEST_HOST, port=3050, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD)) as con: + self.assertEqual(con.sql_dialect, 3) + self.assertEqual(con.charset, None) + con.drop_database() + # + with closing(fdb.create_database(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD, + sql_dialect=1, charset='UTF8')) as con: + self.assertEqual(con.sql_dialect, 1) + self.assertEqual(con.charset, 'UTF8') con.drop_database() class TestConnection(FDBTestBase): def setUp(self): - super(TestConnection,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) + super(TestConnection, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) def tearDown(self): pass def test_connect(self): - with fdb.connect(dsn=self.dbfile,user=FBTEST_USER,password=FBTEST_PASSWORD) as con: + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: self.assertIsNotNone(con._db_handle) - dpb = [1,0x1c,len(FBTEST_USER)] + dpb = [1, 0x1c, len(FBTEST_USER)] dpb.extend(ord(x) for x in FBTEST_USER) - dpb.extend((0x1d,len(FBTEST_PASSWORD))) + dpb.extend((0x1d, len(FBTEST_PASSWORD))) dpb.extend(ord(x) for x in FBTEST_PASSWORD) - dpb.extend((ord('?'),1,3)) - self.assertEqual(con._dpb,fdb.bs(dpb)) - with fdb.connect(dsn=self.dbfile,user=FBTEST_USER,password=FBTEST_PASSWORD, - no_gc=1,no_db_triggers=1) as con: - dpb.extend([ibase.isc_dpb_no_garbage_collect,1,1]) - dpb.extend([ibase.isc_dpb_no_db_triggers,1,1]) - self.assertEqual(con._dpb,fdb.bs(dpb)) + dpb.extend((ord('?'), 1, 3)) + self.assertEqual(con._dpb, fdb.bs(dpb)) + with fdb.connect(database=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: + self.assertIsNotNone(con._db_handle) + with fdb.connect(port=3050, database=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: + self.assertIsNotNone(con._db_handle) + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD, + no_gc=1, no_db_triggers=1) as con: + dpb.extend([ibase.isc_dpb_no_garbage_collect, 1, 1]) + dpb.extend([ibase.isc_dpb_no_db_triggers, 1, 1]) + self.assertEqual(con._dpb, fdb.bs(dpb)) def test_properties(self): - with fdb.connect(dsn=self.dbfile,user=FBTEST_USER,password=FBTEST_PASSWORD) as con: - self.assertIn('Firebird',con.server_version) - self.assertIn('Firebird',con.firebird_version) - self.assertIsInstance(con.version,str) - self.assertGreaterEqual(con.engine_version,2.0) - self.assertGreaterEqual(con.ods,11.0) + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: + self.assertIn('Firebird', con.server_version) + self.assertIn('Firebird', con.firebird_version) + self.assertIsInstance(con.version, str) + self.assertGreaterEqual(con.engine_version, 2.0) + self.assertGreaterEqual(con.ods, 11.0) self.assertIsNone(con.group) self.assertIsNone(con.charset) - self.assertEqual(len(con.transactions),2) - self.assertIn(con.main_transaction,con.transactions) - self.assertIn(con.query_transaction,con.transactions) - self.assertEqual(con.default_tpb,fdb.ISOLATION_LEVEL_READ_COMMITED) - self.assertIsInstance(con.schema,sm.Schema) + self.assertEqual(len(con.transactions), 2) + self.assertIn(con.main_transaction, con.transactions) + self.assertIn(con.query_transaction, con.transactions) + self.assertEqual(con.default_tpb, fdb.ISOLATION_LEVEL_READ_COMMITED) + self.assertIsInstance(con.schema, sm.Schema) self.assertFalse(con.closed) def test_connect_role(self): rolename = 'role' - with fdb.connect(dsn=self.dbfile,user=FBTEST_USER, - password=FBTEST_PASSWORD,role=rolename) as con: + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, + password=FBTEST_PASSWORD, role=rolename) as con: self.assertIsNotNone(con._db_handle) - dpb = [1,0x1c,len(FBTEST_USER)] + dpb = [1, 0x1c, len(FBTEST_USER)] dpb.extend(ord(x) for x in FBTEST_USER) - dpb.extend((0x1d,len(FBTEST_PASSWORD))) + dpb.extend((0x1d, len(FBTEST_PASSWORD))) dpb.extend(ord(x) for x in FBTEST_PASSWORD) - dpb.extend((ord('<'),len(rolename))) + dpb.extend((ord('<'), len(rolename))) dpb.extend(ord(x) for x in rolename) - dpb.extend((ord('?'),1,3)) - self.assertEqual(con._dpb,fdb.bs(dpb)) + dpb.extend((ord('?'), 1, 3)) + self.assertEqual(con._dpb, fdb.bs(dpb)) def test_transaction(self): - with fdb.connect(dsn=self.dbfile,user=FBTEST_USER,password=FBTEST_PASSWORD) as con: + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: self.assertIsNotNone(con.main_transaction) self.assertFalse(con.main_transaction.active) self.assertFalse(con.main_transaction.closed) - self.assertEqual(con.main_transaction.default_action,'commit') - self.assertEqual(len(con.main_transaction._connections),1) - self.assertEqual(con.main_transaction._connections[0](),con) + self.assertEqual(con.main_transaction.default_action, 'commit') + self.assertEqual(len(con.main_transaction._connections), 1) + self.assertEqual(con.main_transaction._connections[0](), con) con.begin() self.assertFalse(con.main_transaction.closed) con.commit() @@ -238,9 +269,9 @@ con.rollback(retaining=True) self.assertTrue(con.main_transaction.active) tr = con.trans() - self.assertIsInstance(tr,fdb.Transaction) + self.assertIsInstance(tr, fdb.Transaction) self.assertFalse(con.main_transaction.closed) - self.assertEqual(len(con.transactions),3) + self.assertEqual(len(con.transactions), 3) tr.begin() self.assertFalse(tr.closed) con.begin() @@ -250,86 +281,105 @@ self.assertFalse(tr.active) self.assertTrue(tr.closed) def test_execute_immediate(self): - with fdb.connect(dsn=self.dbfile,user=FBTEST_USER,password=FBTEST_PASSWORD) as con: + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: con.execute_immediate("recreate table t (c1 integer)") con.commit() con.execute_immediate("delete from t") con.commit() def test_database_info(self): - with fdb.connect(dsn=self.dbfile,user=FBTEST_USER,password=FBTEST_PASSWORD) as con: - #x = con.database_info(fdb.fb_info_page_contents,'s',0) - self.assertEqual(con.database_info(fdb.isc_info_db_read_only,'i'),0) + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: + self.assertEqual(con.database_info(fdb.isc_info_db_read_only, 'i'), 0) if con.ods < fdb.ODS_FB_30: - self.assertEqual(con.database_info(fdb.isc_info_page_size,'i'),4096) + self.assertEqual(con.database_info(fdb.isc_info_page_size, 'i'), 4096) else: - self.assertEqual(con.database_info(fdb.isc_info_page_size,'i'),8192) - self.assertEqual(con.database_info(fdb.isc_info_db_sql_dialect,'i'),3) + self.assertEqual(con.database_info(fdb.isc_info_page_size, 'i'), 8192) + self.assertEqual(con.database_info(fdb.isc_info_db_sql_dialect, 'i'), 3) def test_db_info(self): - with fdb.connect(dsn=self.dbfile,user=FBTEST_USER,password=FBTEST_PASSWORD) as con: + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: with con.trans() as t1, con.trans() as t2: - self.assertListEqual(con.db_info(fdb.isc_info_active_transactions),[]) + self.assertListEqual(con.db_info(fdb.isc_info_active_transactions), []) t1.begin() t2.begin() self.assertListEqual(con.db_info(fdb.isc_info_active_transactions), - [t1.transaction_id,t2.transaction_id]) + [t1.transaction_id, t2.transaction_id]) + # + self.assertEqual(len(con.get_page_contents(0)), con.page_size) # - self.assertEqual(len(con.get_page_contents(0)),con.page_size) res = con.db_info([fdb.isc_info_page_size, fdb.isc_info_db_read_only, - fdb.isc_info_db_sql_dialect,fdb.isc_info_user_names]) + fdb.isc_info_db_sql_dialect, fdb.isc_info_user_names]) if con.ods < fdb.ODS_FB_30: - self.assertDictEqual(res,{53: {'SYSDBA': 1}, 62: 3, 14: 4096, 63: 0}) + self.assertDictEqual(res, {53: {'SYSDBA': 1}, 62: 3, 14: 4096, 63: 0}) else: - self.assertDictEqual(res,{53: {'SYSDBA': 1}, 62: 3, 14: 8192, 63: 0}) + self.assertDictEqual(res, {53: {'SYSDBA': 1}, 62: 3, 14: 8192, 63: 0}) res = con.db_info(fdb.isc_info_read_seq_count) if con.ods < fdb.ODS_FB_30: - self.assertDictEqual(res,{0: 98, 1: 1}) + self.assertDictEqual(res, {0: 98, 1: 1}) else: - self.assertDictEqual(res,{0: 106, 1: 2}) + self.assertDictEqual(res, {0: 106, 1: 2}) + # + self.assertIsInstance(con.db_info(fdb.isc_info_allocation), int) + self.assertIsInstance(con.db_info(fdb.isc_info_base_level), int) + res = con.db_info(fdb.isc_info_db_id) + self.assertIsInstance(res, tuple) + self.assertEqual(res[0].upper(), self.dbfile.upper()) + res = con.db_info(ibase.isc_info_implementation) + self.assertIsInstance(res, tuple) + self.assertEqual(len(res), 2) + self.assertIsInstance(res[0], int) + self.assertIsInstance(res[1], int) + self.assertNotEqual(fdb.IMPLEMENTATION_NAMES.get(res[0], 'Unknown'), 'Unknown') + self.assertIn('Firebird', con.db_info(fdb.isc_info_version)) + self.assertIn('Firebird', con.db_info(fdb.isc_info_firebird_version)) + self.assertIn(con.db_info(fdb.isc_info_no_reserve), (0, 1)) + self.assertIn(con.db_info(fdb.isc_info_forced_writes), (0, 1)) + self.assertIsInstance(con.db_info(fdb.isc_info_base_level), int) + self.assertIsInstance(con.db_info(fdb.isc_info_ods_version), int) + self.assertIsInstance(con.db_info(fdb.isc_info_ods_minor_version), int) + def test_info_attributes(self): - with fdb.connect(dsn=self.dbfile,user=FBTEST_USER,password=FBTEST_PASSWORD) as con: - self.assertGreater(con.attachment_id,0) - self.assertEqual(con.sql_dialect,3) - self.assertEqual(con.database_sql_dialect,3) - self.assertEqual(con.database_name,self.dbfile) - self.assertIsInstance(con.site_name,str) - self.assertIn(con.implementation_id,fdb.IMPLEMENTATION_NAMES.keys()) - self.assertIn(con.provider_id,fdb.PROVIDER_NAMES.keys()) - self.assertIn(con.db_class_id,fdb.DB_CLASS_NAMES.keys()) - self.assertIsInstance(con.creation_date,datetime.datetime) - self.assertIn(con.page_size,[4096,8192,16384]) - self.assertEqual(con.sweep_interval,20000) + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: + self.assertGreater(con.attachment_id, 0) + self.assertEqual(con.sql_dialect, 3) + self.assertEqual(con.database_sql_dialect, 3) + self.assertEqual(con.database_name.upper(), self.dbfile.upper()) + self.assertIsInstance(con.site_name, str) + self.assertIn(con.implementation_id, fdb.IMPLEMENTATION_NAMES.keys()) + self.assertIn(con.provider_id, fdb.PROVIDER_NAMES.keys()) + self.assertIn(con.db_class_id, fdb.DB_CLASS_NAMES.keys()) + self.assertIsInstance(con.creation_date, datetime.datetime) + self.assertIn(con.page_size, [4096, 8192, 16384]) + self.assertEqual(con.sweep_interval, 20000) self.assertTrue(con.space_reservation) self.assertTrue(con.forced_writes) - self.assertGreater(con.current_memory,0) - self.assertGreater(con.max_memory,0) - self.assertGreater(con.oit,0) - self.assertGreater(con.oat,0) - self.assertGreater(con.ost,0) - self.assertGreater(con.next_transaction,0) + self.assertGreater(con.current_memory, 0) + self.assertGreater(con.max_memory, 0) + self.assertGreater(con.oit, 0) + self.assertGreater(con.oat, 0) + self.assertGreater(con.ost, 0) + self.assertGreater(con.next_transaction, 0) self.assertFalse(con.isreadonly()) # io = con.io_stats - self.assertEqual(len(io),4) - self.assertIsInstance(io,dict) + self.assertEqual(len(io), 4) + self.assertIsInstance(io, dict) s = con.get_table_access_stats() - self.assertEqual(len(s),6) - self.assertIsInstance(s[0],fdb.fbcore._TableAccessStats) + self.assertEqual(len(s), 6) + self.assertIsInstance(s[0], fdb.fbcore._TableAccessStats) # with con.trans() as t1, con.trans() as t2: - self.assertListEqual(con.get_active_transaction_ids(),[]) + self.assertListEqual(con.get_active_transaction_ids(), []) t1.begin() t2.begin() self.assertListEqual(con.get_active_transaction_ids(), - [t1.transaction_id,t2.transaction_id]) + [t1.transaction_id, t2.transaction_id]) self.assertEqual(con.get_active_transaction_count(), 2) + class TestTransaction(FDBTestBase): def setUp(self): - super(TestTransaction,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + super(TestTransaction, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) #self.con.execute_immediate("recreate table t (c1 integer)") #self.con.commit() def tearDown(self): @@ -344,11 +394,11 @@ tr.commit() cur.execute("select * from t") rows = cur.fetchall() - self.assertListEqual(rows,[(1,)]) + self.assertListEqual(rows, [(1,)]) cur.execute("delete from t") tr.commit() - self.assertEqual(len(tr.cursors),1) - self.assertIs(tr.cursors[0],cur) + self.assertEqual(len(tr.cursors), 1) + self.assertIs(tr.cursors[0], cur) def test_context_manager(self): with fdb.TransactionContext(self.con) as tr: cur = tr.cursor() @@ -356,7 +406,7 @@ cur.execute("select * from t") rows = cur.fetchall() - self.assertListEqual(rows,[(1,)]) + self.assertListEqual(rows, [(1,)]) try: with fdb.TransactionContext(self.con) as tr: @@ -367,14 +417,14 @@ cur.execute("select * from t") rows = cur.fetchall() - self.assertListEqual(rows,[(1,)]) + self.assertListEqual(rows, [(1,)]) with fdb.TransactionContext(self.con) as tr: cur.execute("delete from t") cur.execute("select * from t") rows = cur.fetchall() - self.assertListEqual(rows,[]) + self.assertListEqual(rows, []) def test_savepoint(self): self.con.begin() tr = self.con.main_transaction @@ -386,7 +436,7 @@ cur = tr.cursor() cur.execute("select * from t") rows = cur.fetchall() - self.assertListEqual(rows,[(1,)]) + self.assertListEqual(rows, [(1,)]) def test_fetch_after_commit(self): self.con.execute_immediate("insert into t (c1) values (1)") self.con.commit() @@ -395,7 +445,7 @@ self.con.commit() with self.assertRaises(fdb.DatabaseError) as cm: rows = cur.fetchall() - self.assertTupleEqual(cm.exception.args,('Cannot fetch from this cursor because it has not executed a statement.',)) + self.assertTupleEqual(cm.exception.args, ('Cannot fetch from this cursor because it has not executed a statement.',)) def test_fetch_after_rollback(self): self.con.execute_immediate("insert into t (c1) values (1)") self.con.rollback() @@ -404,56 +454,54 @@ self.con.commit() with self.assertRaises(fdb.DatabaseError) as cm: rows = cur.fetchall() - self.assertTupleEqual(cm.exception.args,('Cannot fetch from this cursor because it has not executed a statement.',)) + self.assertTupleEqual(cm.exception.args, ('Cannot fetch from this cursor because it has not executed a statement.',)) def test_tpb(self): tpb = fdb.TPB() tpb.access_mode = fdb.isc_tpb_write tpb.isolation_level = fdb.isc_tpb_read_committed - tpb.isolation_level = (fdb.isc_tpb_read_committed,fdb.isc_tpb_rec_version) + tpb.isolation_level = (fdb.isc_tpb_read_committed, fdb.isc_tpb_rec_version) tpb.lock_resolution = fdb.isc_tpb_wait tpb.lock_timeout = 10 - tpb.table_reservation['COUNTRY'] = (fdb.isc_tpb_protected,fdb.isc_tpb_lock_write) + tpb.table_reservation['COUNTRY'] = (fdb.isc_tpb_protected, fdb.isc_tpb_lock_write) tr = self.con.trans(tpb) tr.begin() tr.commit() def test_transaction_info(self): self.con.begin() tr = self.con.main_transaction - info = tr.transaction_info(ibase.isc_info_tra_isolation,'s') - self.assertEqual(info,'\x08\x02\x00\x03\x01') + info = tr.transaction_info(ibase.isc_info_tra_isolation, 'b') + self.assertEqual(info, ibase.b('\x08\x02\x00\x03\x01')) # - self.assertGreater(tr.transaction_id,0) - self.assertGreater(tr.oit,0) - self.assertGreater(tr.oat,0) - self.assertGreater(tr.ost,0) - self.assertEqual(tr.lock_timeout,-1) - self.assertTupleEqual(tr.isolation,(3,1)) + self.assertGreater(tr.transaction_id, 0) + self.assertGreater(tr.oit, 0) + self.assertGreater(tr.oat, 0) + self.assertGreater(tr.ost, 0) + self.assertEqual(tr.lock_timeout, -1) + self.assertTupleEqual(tr.isolation, (3, 1)) tr.commit() class TestDistributedTransaction(FDBTestBase): def setUp(self): - super(TestDistributedTransaction,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.db1 = os.path.join(self.dbpath,'fbtest-1.fdb') - self.db2 = os.path.join(self.dbpath,'fbtest-2.fdb') + super(TestDistributedTransaction, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.db1 = os.path.join(self.dbpath, 'fbtest-1.fdb') + self.db2 = os.path.join(self.dbpath, 'fbtest-2.fdb') if not os.path.exists(self.db1): - self.con1 = fdb.create_database(host=FBTEST_HOST,database=self.db1, + self.con1 = fdb.create_database(host=FBTEST_HOST, database=self.db1, user=FBTEST_USER, password=FBTEST_PASSWORD) else: - self.con1 = fdb.connect(host=FBTEST_HOST,database=self.db1, - user=FBTEST_USER,password=FBTEST_PASSWORD) + self.con1 = fdb.connect(host=FBTEST_HOST, database=self.db1, + user=FBTEST_USER, password=FBTEST_PASSWORD) self.con1.execute_immediate("recreate table T (PK integer, C1 integer)") self.con1.commit() if not os.path.exists(self.db2): - self.con2 = fdb.create_database(host=FBTEST_HOST,database=self.db2, + self.con2 = fdb.create_database(host=FBTEST_HOST, database=self.db2, user=FBTEST_USER, password=FBTEST_PASSWORD) else: - self.con2 = fdb.connect(host=FBTEST_HOST,database=self.db2, - user=FBTEST_USER,password=FBTEST_PASSWORD) + self.con2 = fdb.connect(host=FBTEST_HOST, database=self.db2, + user=FBTEST_USER, password=FBTEST_PASSWORD) self.con2.execute_immediate("recreate table T (PK integer, C1 integer)") self.con2.commit() def tearDown(self): @@ -461,17 +509,17 @@ # We can't drop database via connection in group self.con1.group.disband() if not self.con1: - self.con1 = fdb.connect(host=FBTEST_HOST,database=self.db1, - user=FBTEST_USER,password=FBTEST_PASSWORD) + self.con1 = fdb.connect(host=FBTEST_HOST, database=self.db1, + user=FBTEST_USER, password=FBTEST_PASSWORD) self.con1.drop_database() self.con1.close() if not self.con2: - self.con2 = fdb.connect(host=FBTEST_HOST,database=self.db2, - user=FBTEST_USER,password=FBTEST_PASSWORD) + self.con2 = fdb.connect(host=FBTEST_HOST, database=self.db2, + user=FBTEST_USER, password=FBTEST_PASSWORD) self.con2.drop_database() self.con2.close() def test_context_manager(self): - cg = fdb.ConnectionGroup((self.con1,self.con2)) + cg = fdb.ConnectionGroup((self.con1, self.con2)) q = 'select * from T order by pk' c1 = cg.cursor(self.con1) @@ -490,11 +538,11 @@ self.con1.commit() cc1.execute(p1) result = cc1.fetchall() - self.assertListEqual(result,[(1, None)]) + self.assertListEqual(result, [(1, None)]) self.con2.commit() cc2.execute(p2) result = cc2.fetchall() - self.assertListEqual(result,[(1, None)]) + self.assertListEqual(result, [(1, None)]) # Distributed transaction: ROLLBACK try: @@ -507,17 +555,17 @@ c1.execute(q) result = c1.fetchall() - self.assertListEqual(result,[(1, None)]) + self.assertListEqual(result, [(1, None)]) c2.execute(q) result = c2.fetchall() - self.assertListEqual(result,[(1, None)]) + self.assertListEqual(result, [(1, None)]) cg.disband() def test_simple_dt(self): - cg = fdb.ConnectionGroup((self.con1,self.con2)) - self.assertEqual(self.con1.group,cg) - self.assertEqual(self.con2.group,cg) + cg = fdb.ConnectionGroup((self.con1, self.con2)) + self.assertEqual(self.con1.group, cg) + self.assertEqual(self.con2.group, cg) q = 'select * from T order by pk' c1 = cg.cursor(self.con1) @@ -536,11 +584,11 @@ self.con1.commit() cc1.execute(p1) result = cc1.fetchall() - self.assertListEqual(result,[(1, None)]) + self.assertListEqual(result, [(1, None)]) self.con2.commit() cc2.execute(p2) result = cc2.fetchall() - self.assertListEqual(result,[(1, None)]) + self.assertListEqual(result, [(1, None)]) # Distributed transaction: PREPARE+COMMIT c1.execute('insert into t (pk) values (2)') @@ -551,11 +599,11 @@ self.con1.commit() cc1.execute(p1) result = cc1.fetchall() - self.assertListEqual(result,[(1, None), (2, None)]) + self.assertListEqual(result, [(1, None), (2, None)]) self.con2.commit() cc2.execute(p2) result = cc2.fetchall() - self.assertListEqual(result,[(1, None), (2, None)]) + self.assertListEqual(result, [(1, None), (2, None)]) # Distributed transaction: SAVEPOINT+ROLLBACK to it c1.execute('insert into t (pk) values (3)') @@ -565,10 +613,10 @@ c1.execute(q) result = c1.fetchall() - self.assertListEqual(result,[(1, None), (2, None), (3, None)]) + self.assertListEqual(result, [(1, None), (2, None), (3, None)]) c2.execute(q) result = c2.fetchall() - self.assertListEqual(result,[(1, None), (2, None)]) + self.assertListEqual(result, [(1, None), (2, None)]) # Distributed transaction: ROLLBACK cg.rollback() @@ -576,11 +624,11 @@ self.con1.commit() cc1.execute(p1) result = cc1.fetchall() - self.assertListEqual(result,[(1, None), (2, None)]) + self.assertListEqual(result, [(1, None), (2, None)]) self.con2.commit() cc2.execute(p2) result = cc2.fetchall() - self.assertListEqual(result,[(1, None), (2, None)]) + self.assertListEqual(result, [(1, None), (2, None)]) # Distributed transaction: EXECUTE_IMMEDIATE cg.execute_immediate('insert into t (pk) values (3)') @@ -589,23 +637,24 @@ self.con1.commit() cc1.execute(p1) result = cc1.fetchall() - self.assertListEqual(result,[(1, None), (2, None), (3, None)]) + self.assertListEqual(result, [(1, None), (2, None), (3, None)]) self.con2.commit() cc2.execute(p2) result = cc2.fetchall() - self.assertListEqual(result,[(1, None), (2, None), (3, None)]) + self.assertListEqual(result, [(1, None), (2, None), (3, None)]) cg.disband() self.assertIsNone(self.con1.group) self.assertIsNone(self.con2.group) def test_limbo_transactions(self): - cg = fdb.ConnectionGroup((self.con1,self.con2)) - svc = fdb.services.connect(host=FBTEST_HOST,password=FBTEST_PASSWORD) + return + cg = fdb.ConnectionGroup((self.con1, self.con2)) + svc = fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD) ids1 = svc.get_limbo_transaction_ids(self.db1) - self.assertEqual(ids1,[]) + self.assertEqual(ids1, []) ids2 = svc.get_limbo_transaction_ids(self.db2) - self.assertEqual(ids2,[]) + self.assertEqual(ids2, []) cg.execute_immediate('insert into t (pk) values (3)') cg.prepare() @@ -625,7 +674,7 @@ with self.assertRaises(fdb.DatabaseError) as cm: cg.disband() self.assertTupleEqual(cm.exception.args, - ('Error while rolling back transaction:\n- SQLCODE: -901\n- invalid transaction handle (expecting explicit transaction start)', -901, 335544332)) + ('Error while rolling back transaction:\n- SQLCODE: -901\n- invalid transaction handle (expecting explicit transaction start)', -901, 335544332)) ids1 = svc.get_limbo_transaction_ids(self.db1) id1 = ids1[0] @@ -634,49 +683,47 @@ # Data chould be blocked by limbo transaction if not self.con1: - self.con1 = fdb.connect(dsn=self.db1,user=FBTEST_USER, + self.con1 = fdb.connect(dsn=self.db1, user=FBTEST_USER, password=FBTEST_PASSWORD) if not self.con2: - self.con2 = fdb.connect(dsn=self.db2,user=FBTEST_USER, + self.con2 = fdb.connect(dsn=self.db2, user=FBTEST_USER, password=FBTEST_PASSWORD) c1 = self.con1.cursor() c1.execute('select * from t') with self.assertRaises(fdb.DatabaseError) as cm: row = c1.fetchall() self.assertTupleEqual(cm.exception.args, - ('Cursor.fetchone:\n- SQLCODE: -911\n- record from transaction %i is stuck in limbo' % id1, -911, 335544459)) + ('Cursor.fetchone:\n- SQLCODE: -911\n- record from transaction %i is stuck in limbo' % id1, -911, 335544459)) c2 = self.con2.cursor() c2.execute('select * from t') with self.assertRaises(fdb.DatabaseError) as cm: row = c2.fetchall() self.assertTupleEqual(cm.exception.args, - ('Cursor.fetchone:\n- SQLCODE: -911\n- record from transaction %i is stuck in limbo' % id2, -911, 335544459)) + ('Cursor.fetchone:\n- SQLCODE: -911\n- record from transaction %i is stuck in limbo' % id2, -911, 335544459)) # resolve via service - svc = fdb.services.connect(host=FBTEST_HOST,password=FBTEST_PASSWORD) - svc.commit_limbo_transaction(self.db1,id1) - svc.rollback_limbo_transaction(self.db2,id2) + svc = fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD) + svc.commit_limbo_transaction(self.db1, id1) + svc.rollback_limbo_transaction(self.db2, id2) # check the resolution c1 = self.con1.cursor() c1.execute('select * from t') row = c1.fetchall() - self.assertListEqual(row,[(3, None)]) + self.assertListEqual(row, [(3, None)]) c2 = self.con2.cursor() c2.execute('select * from t') row = c2.fetchall() - self.assertListEqual(row,[]) + self.assertListEqual(row, []) svc.close() class TestCursor(FDBTestBase): def setUp(self): - super(TestCursor,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + super(TestCursor, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) self.con.execute_immediate("recreate table t (c1 integer primary key)") self.con.commit() def tearDown(self): @@ -685,17 +732,20 @@ self.con.close() def test_executemany(self): cur = self.con.cursor() - cur.executemany("insert into t values(?)",[(1,),(2,)]) - cur.executemany("insert into t values(?)",[(3,),(4,)]) + cur.executemany("insert into t values(?)", [(1,), (2,)]) + cur.executemany("insert into t values(?)", [(3,)]) + cur.executemany("insert into t values(?)", [(4,), (5,), (6,)]) self.con.commit() p = cur.prep("insert into t values(?)") - cur.executemany(p,[(5,),(6,)]) - cur.executemany(p,[(7,),(8,)]) + cur.executemany(p, [(7,), (8,)]) + cur.executemany(p, [(9,)]) + cur.executemany(p, [(10,), (11,), (12,)]) self.con.commit() cur.execute("select * from T order by c1") rows = cur.fetchall() - self.assertListEqual(rows,[(1,),(2,),(3,),(4,), - (5,),(6,),(7,),(8,)]) + self.assertListEqual(rows, [(1,), (2,), (3,), (4,), + (5,), (6,), (7,), (8,), + (9,), (10,), (11,), (12,)]) def test_iteration(self): if self.con.ods < fdb.ODS_FB_30: data = [('USA', 'Dollar'), ('England', 'Pound'), ('Canada', 'CdnDlr'), @@ -713,284 +763,284 @@ cur = self.con.cursor() cur.execute('select * from country') rows = [row for row in cur] - self.assertEqual(len(rows),len(data)) - self.assertListEqual(rows,data) + self.assertEqual(len(rows), len(data)) + self.assertListEqual(rows, data) cur.execute('select * from country') rows = [] for row in cur: rows.append(row) - self.assertEqual(len(rows),len(data)) - self.assertListEqual(rows,data) + self.assertEqual(len(rows), len(data)) + self.assertListEqual(rows, data) cur.execute('select * from country') i = 0 for row in cur: i += 1 - self.assertIn(row,data) - self.assertEqual(i,len(data)) + self.assertIn(row, data) + self.assertEqual(i, len(data)) def test_description(self): cur = self.con.cursor() cur.execute('select * from country') - self.assertEqual(len(cur.description),2) - if ibase.PYTHON_MAJOR_VER==3: + self.assertEqual(len(cur.description), 2) + if ibase.PYTHON_MAJOR_VER == 3: self.assertEqual(repr(cur.description), - "(('COUNTRY', , 15, 15, 0, 0, False), " \ - "('CURRENCY', , 10, 10, 0, 0, False))") + "(('COUNTRY', , 15, 15, 0, 0, False), " \ + "('CURRENCY', , 10, 10, 0, 0, False))") else: self.assertEqual(repr(cur.description), - "(('COUNTRY', , 15, 15, 0, 0, False), " \ - "('CURRENCY', , 10, 10, 0, 0, False))") + "(('COUNTRY', , 15, 15, 0, 0, False), " \ + "('CURRENCY', , 10, 10, 0, 0, False))") cur.execute('select country as CT, currency as CUR from country') - self.assertEqual(len(cur.description),2) + self.assertEqual(len(cur.description), 2) cur.execute('select * from customer') - if ibase.PYTHON_MAJOR_VER==3: + if ibase.PYTHON_MAJOR_VER == 3: self.assertEqual(repr(cur.description), - "(('CUST_NO', , 11, 4, 0, 0, False), " \ - "('CUSTOMER', , 25, 25, 0, 0, False), " \ - "('CONTACT_FIRST', , 15, 15, 0, 0, True), " \ - "('CONTACT_LAST', , 20, 20, 0, 0, True), " \ - "('PHONE_NO', , 20, 20, 0, 0, True), " \ - "('ADDRESS_LINE1', , 30, 30, 0, 0, True), " \ - "('ADDRESS_LINE2', , 30, 30, 0, 0, True), " \ - "('CITY', , 25, 25, 0, 0, True), " \ - "('STATE_PROVINCE', , 15, 15, 0, 0, True), " \ - "('COUNTRY', , 15, 15, 0, 0, True), " \ - "('POSTAL_CODE', , 12, 12, 0, 0, True), " \ - "('ON_HOLD', , 1, 1, 0, 0, True))") + "(('CUST_NO', , 11, 4, 0, 0, False), " \ + "('CUSTOMER', , 25, 25, 0, 0, False), " \ + "('CONTACT_FIRST', , 15, 15, 0, 0, True), " \ + "('CONTACT_LAST', , 20, 20, 0, 0, True), " \ + "('PHONE_NO', , 20, 20, 0, 0, True), " \ + "('ADDRESS_LINE1', , 30, 30, 0, 0, True), " \ + "('ADDRESS_LINE2', , 30, 30, 0, 0, True), " \ + "('CITY', , 25, 25, 0, 0, True), " \ + "('STATE_PROVINCE', , 15, 15, 0, 0, True), " \ + "('COUNTRY', , 15, 15, 0, 0, True), " \ + "('POSTAL_CODE', , 12, 12, 0, 0, True), " \ + "('ON_HOLD', , 1, 1, 0, 0, True))") else: self.assertEqual(repr(cur.description), - "(('CUST_NO', , 11, 4, 0, 0, False), " \ - "('CUSTOMER', , 25, 25, 0, 0, False), " \ - "('CONTACT_FIRST', , 15, 15, 0, 0, True), " \ - "('CONTACT_LAST', , 20, 20, 0, 0, True), " \ - "('PHONE_NO', , 20, 20, 0, 0, True), " \ - "('ADDRESS_LINE1', , 30, 30, 0, 0, True), " \ - "('ADDRESS_LINE2', , 30, 30, 0, 0, True), " \ - "('CITY', , 25, 25, 0, 0, True), " \ - "('STATE_PROVINCE', , 15, 15, 0, 0, True), " \ - "('COUNTRY', , 15, 15, 0, 0, True), " \ - "('POSTAL_CODE', , 12, 12, 0, 0, True), " \ - "('ON_HOLD', , 1, 1, 0, 0, True))") + "(('CUST_NO', , 11, 4, 0, 0, False), " \ + "('CUSTOMER', , 25, 25, 0, 0, False), " \ + "('CONTACT_FIRST', , 15, 15, 0, 0, True), " \ + "('CONTACT_LAST', , 20, 20, 0, 0, True), " \ + "('PHONE_NO', , 20, 20, 0, 0, True), " \ + "('ADDRESS_LINE1', , 30, 30, 0, 0, True), " \ + "('ADDRESS_LINE2', , 30, 30, 0, 0, True), " \ + "('CITY', , 25, 25, 0, 0, True), " \ + "('STATE_PROVINCE', , 15, 15, 0, 0, True), " \ + "('COUNTRY', , 15, 15, 0, 0, True), " \ + "('POSTAL_CODE', , 12, 12, 0, 0, True), " \ + "('ON_HOLD', , 1, 1, 0, 0, True))") cur.execute('select * from job') - if ibase.PYTHON_MAJOR_VER==3: + if ibase.PYTHON_MAJOR_VER == 3: self.assertEqual(repr(cur.description), - "(('JOB_CODE', , 5, 5, 0, 0, False), " \ - "('JOB_GRADE', , 6, 2, 0, 0, False), " \ - "('JOB_COUNTRY', , 15, 15, 0, 0, False), " \ - "('JOB_TITLE', , 25, 25, 0, 0, False), " \ - "('MIN_SALARY', , 20, 8, 10, -2, False), " \ - "('MAX_SALARY', , 20, 8, 10, -2, False), " \ - "('JOB_REQUIREMENT', , 0, 8, 0, 1, True), " \ - "('LANGUAGE_REQ', , -1, 8, 0, 0, True))") + "(('JOB_CODE', , 5, 5, 0, 0, False), " \ + "('JOB_GRADE', , 6, 2, 0, 0, False), " \ + "('JOB_COUNTRY', , 15, 15, 0, 0, False), " \ + "('JOB_TITLE', , 25, 25, 0, 0, False), " \ + "('MIN_SALARY', , 20, 8, 10, -2, False), " \ + "('MAX_SALARY', , 20, 8, 10, -2, False), " \ + "('JOB_REQUIREMENT', , 0, 8, 0, 1, True), " \ + "('LANGUAGE_REQ', , -1, 8, 0, 0, True))") else: self.assertEqual(repr(cur.description), - "(('JOB_CODE', , 5, 5, 0, 0, False), " \ - "('JOB_GRADE', , 6, 2, 0, 0, False), " \ - "('JOB_COUNTRY', , 15, 15, 0, 0, False), " \ - "('JOB_TITLE', , 25, 25, 0, 0, False), " \ - "('MIN_SALARY', , 20, 8, 10, -2, False), " \ - "('MAX_SALARY', , 20, 8, 10, -2, False), " \ - "('JOB_REQUIREMENT', , 0, 8, 0, 1, True), " \ - "('LANGUAGE_REQ', , -1, 8, 0, 0, True))") + "(('JOB_CODE', , 5, 5, 0, 0, False), " \ + "('JOB_GRADE', , 6, 2, 0, 0, False), " \ + "('JOB_COUNTRY', , 15, 15, 0, 0, False), " \ + "('JOB_TITLE', , 25, 25, 0, 0, False), " \ + "('MIN_SALARY', , 20, 8, 10, -2, False), " \ + "('MAX_SALARY', , 20, 8, 10, -2, False), " \ + "('JOB_REQUIREMENT', , 0, 8, 0, 1, True), " \ + "('LANGUAGE_REQ', , -1, 8, 0, 0, True))") cur.execute('select * from proj_dept_budget') - if ibase.PYTHON_MAJOR_VER==3: + if ibase.PYTHON_MAJOR_VER == 3: self.assertEqual(repr(cur.description), - "(('FISCAL_YEAR', , 11, 4, 0, 0, False), " \ - "('PROJ_ID', , 5, 5, 0, 0, False), " \ - "('DEPT_NO', , 3, 3, 0, 0, False), " \ - "('QUART_HEAD_CNT', , -1, 8, 0, 0, True), " \ - "('PROJECTED_BUDGET', , 20, 8, 12, -2, True))") + "(('FISCAL_YEAR', , 11, 4, 0, 0, False), " \ + "('PROJ_ID', , 5, 5, 0, 0, False), " \ + "('DEPT_NO', , 3, 3, 0, 0, False), " \ + "('QUART_HEAD_CNT', , -1, 8, 0, 0, True), " \ + "('PROJECTED_BUDGET', , 20, 8, 12, -2, True))") else: self.assertEqual(repr(cur.description), - "(('FISCAL_YEAR', , 11, 4, 0, 0, False), " \ - "('PROJ_ID', , 5, 5, 0, 0, False), " \ - "('DEPT_NO', , 3, 3, 0, 0, False), " \ - "('QUART_HEAD_CNT', , -1, 8, 0, 0, True), " \ - "('PROJECTED_BUDGET', , 20, 8, 12, -2, True))") + "(('FISCAL_YEAR', , 11, 4, 0, 0, False), " \ + "('PROJ_ID', , 5, 5, 0, 0, False), " \ + "('DEPT_NO', , 3, 3, 0, 0, False), " \ + "('QUART_HEAD_CNT', , -1, 8, 0, 0, True), " \ + "('PROJECTED_BUDGET', , 20, 8, 12, -2, True))") # Check for precision cache cur2 = self.con.cursor() cur2.execute('select * from proj_dept_budget') - if ibase.PYTHON_MAJOR_VER==3: + if ibase.PYTHON_MAJOR_VER == 3: self.assertEqual(repr(cur2.description), - "(('FISCAL_YEAR', , 11, 4, 0, 0, False), " \ - "('PROJ_ID', , 5, 5, 0, 0, False), " \ - "('DEPT_NO', , 3, 3, 0, 0, False), " \ - "('QUART_HEAD_CNT', , -1, 8, 0, 0, True), " \ - "('PROJECTED_BUDGET', , 20, 8, 12, -2, True))") + "(('FISCAL_YEAR', , 11, 4, 0, 0, False), " \ + "('PROJ_ID', , 5, 5, 0, 0, False), " \ + "('DEPT_NO', , 3, 3, 0, 0, False), " \ + "('QUART_HEAD_CNT', , -1, 8, 0, 0, True), " \ + "('PROJECTED_BUDGET', , 20, 8, 12, -2, True))") else: self.assertEqual(repr(cur2.description), - "(('FISCAL_YEAR', , 11, 4, 0, 0, False), " \ - "('PROJ_ID', , 5, 5, 0, 0, False), " \ - "('DEPT_NO', , 3, 3, 0, 0, False), " \ - "('QUART_HEAD_CNT', , -1, 8, 0, 0, True), " \ - "('PROJECTED_BUDGET', , 20, 8, 12, -2, True))") + "(('FISCAL_YEAR', , 11, 4, 0, 0, False), " \ + "('PROJ_ID', , 5, 5, 0, 0, False), " \ + "('DEPT_NO', , 3, 3, 0, 0, False), " \ + "('QUART_HEAD_CNT', , -1, 8, 0, 0, True), " \ + "('PROJECTED_BUDGET', , 20, 8, 12, -2, True))") def test_exec_after_close(self): cur = self.con.cursor() cur.execute('select * from country') row = cur.fetchone() - self.assertTupleEqual(row,('USA', 'Dollar')) + self.assertTupleEqual(row, ('USA', 'Dollar')) cur.close() cur.execute('select * from country') row = cur.fetchone() - self.assertTupleEqual(row,('USA', 'Dollar')) + self.assertTupleEqual(row, ('USA', 'Dollar')) def test_fetchone(self): cur = self.con.cursor() cur.execute('select * from country') row = cur.fetchone() - self.assertTupleEqual(row,('USA', 'Dollar')) + self.assertTupleEqual(row, ('USA', 'Dollar')) def test_fetchall(self): cur = self.con.cursor() cur.execute('select * from country') rows = cur.fetchall() if self.con.ods < fdb.ODS_FB_30: self.assertListEqual(rows, - [('USA', 'Dollar'), ('England', 'Pound'), ('Canada', 'CdnDlr'), - ('Switzerland', 'SFranc'), ('Japan', 'Yen'), ('Italy', 'Lira'), - ('France', 'FFranc'), ('Germany', 'D-Mark'), ('Australia', 'ADollar'), - ('Hong Kong', 'HKDollar'), ('Netherlands', 'Guilder'), - ('Belgium', 'BFranc'), ('Austria', 'Schilling'), ('Fiji', 'FDollar')]) + [('USA', 'Dollar'), ('England', 'Pound'), ('Canada', 'CdnDlr'), + ('Switzerland', 'SFranc'), ('Japan', 'Yen'), ('Italy', 'Lira'), + ('France', 'FFranc'), ('Germany', 'D-Mark'), ('Australia', 'ADollar'), + ('Hong Kong', 'HKDollar'), ('Netherlands', 'Guilder'), + ('Belgium', 'BFranc'), ('Austria', 'Schilling'), ('Fiji', 'FDollar')]) else: self.assertListEqual(rows, - [('USA', 'Dollar'), ('England', 'Pound'), ('Canada', 'CdnDlr'), - ('Switzerland', 'SFranc'), ('Japan', 'Yen'), ('Italy', 'Euro'), - ('France', 'Euro'), ('Germany', 'Euro'), ('Australia', 'ADollar'), - ('Hong Kong', 'HKDollar'), ('Netherlands', 'Euro'), - ('Belgium', 'Euro'), ('Austria', 'Euro'), ('Fiji', 'FDollar'), - ('Russia', 'Ruble'), ('Romania', 'RLeu')]) + [('USA', 'Dollar'), ('England', 'Pound'), ('Canada', 'CdnDlr'), + ('Switzerland', 'SFranc'), ('Japan', 'Yen'), ('Italy', 'Euro'), + ('France', 'Euro'), ('Germany', 'Euro'), ('Australia', 'ADollar'), + ('Hong Kong', 'HKDollar'), ('Netherlands', 'Euro'), + ('Belgium', 'Euro'), ('Austria', 'Euro'), ('Fiji', 'FDollar'), + ('Russia', 'Ruble'), ('Romania', 'RLeu')]) def test_fetchmany(self): cur = self.con.cursor() cur.execute('select * from country') rows = cur.fetchmany(10) if self.con.ods < fdb.ODS_FB_30: self.assertListEqual(rows, - [('USA', 'Dollar'), ('England', 'Pound'), ('Canada', 'CdnDlr'), - ('Switzerland', 'SFranc'), ('Japan', 'Yen'), ('Italy', 'Lira'), - ('France', 'FFranc'), ('Germany', 'D-Mark'), ('Australia', 'ADollar'), - ('Hong Kong', 'HKDollar')]) + [('USA', 'Dollar'), ('England', 'Pound'), ('Canada', 'CdnDlr'), + ('Switzerland', 'SFranc'), ('Japan', 'Yen'), ('Italy', 'Lira'), + ('France', 'FFranc'), ('Germany', 'D-Mark'), ('Australia', 'ADollar'), + ('Hong Kong', 'HKDollar')]) rows = cur.fetchmany(10) self.assertListEqual(rows, - [('Netherlands', 'Guilder'), ('Belgium', 'BFranc'), - ('Austria', 'Schilling'), ('Fiji', 'FDollar')]) + [('Netherlands', 'Guilder'), ('Belgium', 'BFranc'), + ('Austria', 'Schilling'), ('Fiji', 'FDollar')]) rows = cur.fetchmany(10) - self.assertEqual(len(rows),0) + self.assertEqual(len(rows), 0) else: self.assertListEqual(rows, - [('USA', 'Dollar'), ('England', 'Pound'), ('Canada', 'CdnDlr'), - ('Switzerland', 'SFranc'), ('Japan', 'Yen'), ('Italy', 'Euro'), - ('France', 'Euro'), ('Germany', 'Euro'), ('Australia', 'ADollar'), - ('Hong Kong', 'HKDollar')]) + [('USA', 'Dollar'), ('England', 'Pound'), ('Canada', 'CdnDlr'), + ('Switzerland', 'SFranc'), ('Japan', 'Yen'), ('Italy', 'Euro'), + ('France', 'Euro'), ('Germany', 'Euro'), ('Australia', 'ADollar'), + ('Hong Kong', 'HKDollar')]) rows = cur.fetchmany(10) self.assertListEqual(rows, - [('Netherlands', 'Euro'), ('Belgium', 'Euro'), ('Austria', 'Euro'), - ('Fiji', 'FDollar'), ('Russia', 'Ruble'), ('Romania', 'RLeu')]) + [('Netherlands', 'Euro'), ('Belgium', 'Euro'), ('Austria', 'Euro'), + ('Fiji', 'FDollar'), ('Russia', 'Ruble'), ('Romania', 'RLeu')]) rows = cur.fetchmany(10) - self.assertEqual(len(rows),0) + self.assertEqual(len(rows), 0) def test_fetchonemap(self): cur = self.con.cursor() cur.execute('select * from country') row = cur.fetchonemap() - self.assertListEqual(row.items(),[('COUNTRY', 'USA'), ('CURRENCY', 'Dollar')]) + self.assertListEqual(row.items(), [('COUNTRY', 'USA'), ('CURRENCY', 'Dollar')]) def test_fetchallmap(self): cur = self.con.cursor() cur.execute('select * from country') rows = cur.fetchallmap() if self.con.ods < fdb.ODS_FB_30: self.assertListEqual([row.items() for row in rows], - [[('COUNTRY', 'USA'), ('CURRENCY', 'Dollar')], - [('COUNTRY', 'England'), ('CURRENCY', 'Pound')], - [('COUNTRY', 'Canada'), ('CURRENCY', 'CdnDlr')], - [('COUNTRY', 'Switzerland'), ('CURRENCY', 'SFranc')], - [('COUNTRY', 'Japan'), ('CURRENCY', 'Yen')], - [('COUNTRY', 'Italy'), ('CURRENCY', 'Lira')], - [('COUNTRY', 'France'), ('CURRENCY', 'FFranc')], - [('COUNTRY', 'Germany'), ('CURRENCY', 'D-Mark')], - [('COUNTRY', 'Australia'), ('CURRENCY', 'ADollar')], - [('COUNTRY', 'Hong Kong'), ('CURRENCY', 'HKDollar')], - [('COUNTRY', 'Netherlands'), ('CURRENCY', 'Guilder')], - [('COUNTRY', 'Belgium'), ('CURRENCY', 'BFranc')], - [('COUNTRY', 'Austria'), ('CURRENCY', 'Schilling')], - [('COUNTRY', 'Fiji'), ('CURRENCY', 'FDollar')]]) + [[('COUNTRY', 'USA'), ('CURRENCY', 'Dollar')], + [('COUNTRY', 'England'), ('CURRENCY', 'Pound')], + [('COUNTRY', 'Canada'), ('CURRENCY', 'CdnDlr')], + [('COUNTRY', 'Switzerland'), ('CURRENCY', 'SFranc')], + [('COUNTRY', 'Japan'), ('CURRENCY', 'Yen')], + [('COUNTRY', 'Italy'), ('CURRENCY', 'Lira')], + [('COUNTRY', 'France'), ('CURRENCY', 'FFranc')], + [('COUNTRY', 'Germany'), ('CURRENCY', 'D-Mark')], + [('COUNTRY', 'Australia'), ('CURRENCY', 'ADollar')], + [('COUNTRY', 'Hong Kong'), ('CURRENCY', 'HKDollar')], + [('COUNTRY', 'Netherlands'), ('CURRENCY', 'Guilder')], + [('COUNTRY', 'Belgium'), ('CURRENCY', 'BFranc')], + [('COUNTRY', 'Austria'), ('CURRENCY', 'Schilling')], + [('COUNTRY', 'Fiji'), ('CURRENCY', 'FDollar')]]) else: self.assertListEqual([row.items() for row in rows], - [[('COUNTRY', 'USA'), ('CURRENCY', 'Dollar')], - [('COUNTRY', 'England'), ('CURRENCY', 'Pound')], - [('COUNTRY', 'Canada'), ('CURRENCY', 'CdnDlr')], - [('COUNTRY', 'Switzerland'), ('CURRENCY', 'SFranc')], - [('COUNTRY', 'Japan'), ('CURRENCY', 'Yen')], - [('COUNTRY', 'Italy'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'France'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'Germany'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'Australia'), ('CURRENCY', 'ADollar')], - [('COUNTRY', 'Hong Kong'), ('CURRENCY', 'HKDollar')], - [('COUNTRY', 'Netherlands'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'Belgium'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'Austria'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'Fiji'), ('CURRENCY', 'FDollar')], - [('COUNTRY', 'Russia'), ('CURRENCY', 'Ruble')], - [('COUNTRY', 'Romania'), ('CURRENCY', 'RLeu')]]) + [[('COUNTRY', 'USA'), ('CURRENCY', 'Dollar')], + [('COUNTRY', 'England'), ('CURRENCY', 'Pound')], + [('COUNTRY', 'Canada'), ('CURRENCY', 'CdnDlr')], + [('COUNTRY', 'Switzerland'), ('CURRENCY', 'SFranc')], + [('COUNTRY', 'Japan'), ('CURRENCY', 'Yen')], + [('COUNTRY', 'Italy'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'France'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'Germany'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'Australia'), ('CURRENCY', 'ADollar')], + [('COUNTRY', 'Hong Kong'), ('CURRENCY', 'HKDollar')], + [('COUNTRY', 'Netherlands'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'Belgium'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'Austria'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'Fiji'), ('CURRENCY', 'FDollar')], + [('COUNTRY', 'Russia'), ('CURRENCY', 'Ruble')], + [('COUNTRY', 'Romania'), ('CURRENCY', 'RLeu')]]) def test_fetchmanymap(self): cur = self.con.cursor() cur.execute('select * from country') rows = cur.fetchmanymap(10) if self.con.ods < fdb.ODS_FB_30: self.assertListEqual([row.items() for row in rows], - [[('COUNTRY', 'USA'), ('CURRENCY', 'Dollar')], - [('COUNTRY', 'England'), ('CURRENCY', 'Pound')], - [('COUNTRY', 'Canada'), ('CURRENCY', 'CdnDlr')], - [('COUNTRY', 'Switzerland'), ('CURRENCY', 'SFranc')], - [('COUNTRY', 'Japan'), ('CURRENCY', 'Yen')], - [('COUNTRY', 'Italy'), ('CURRENCY', 'Lira')], - [('COUNTRY', 'France'), ('CURRENCY', 'FFranc')], - [('COUNTRY', 'Germany'), ('CURRENCY', 'D-Mark')], - [('COUNTRY', 'Australia'), ('CURRENCY', 'ADollar')], - [('COUNTRY', 'Hong Kong'), ('CURRENCY', 'HKDollar')]]) + [[('COUNTRY', 'USA'), ('CURRENCY', 'Dollar')], + [('COUNTRY', 'England'), ('CURRENCY', 'Pound')], + [('COUNTRY', 'Canada'), ('CURRENCY', 'CdnDlr')], + [('COUNTRY', 'Switzerland'), ('CURRENCY', 'SFranc')], + [('COUNTRY', 'Japan'), ('CURRENCY', 'Yen')], + [('COUNTRY', 'Italy'), ('CURRENCY', 'Lira')], + [('COUNTRY', 'France'), ('CURRENCY', 'FFranc')], + [('COUNTRY', 'Germany'), ('CURRENCY', 'D-Mark')], + [('COUNTRY', 'Australia'), ('CURRENCY', 'ADollar')], + [('COUNTRY', 'Hong Kong'), ('CURRENCY', 'HKDollar')]]) rows = cur.fetchmanymap(10) self.assertListEqual([row.items() for row in rows], - [[('COUNTRY', 'Netherlands'), ('CURRENCY', 'Guilder')], - [('COUNTRY', 'Belgium'), ('CURRENCY', 'BFranc')], - [('COUNTRY', 'Austria'), ('CURRENCY', 'Schilling')], - [('COUNTRY', 'Fiji'), ('CURRENCY', 'FDollar')]]) + [[('COUNTRY', 'Netherlands'), ('CURRENCY', 'Guilder')], + [('COUNTRY', 'Belgium'), ('CURRENCY', 'BFranc')], + [('COUNTRY', 'Austria'), ('CURRENCY', 'Schilling')], + [('COUNTRY', 'Fiji'), ('CURRENCY', 'FDollar')]]) rows = cur.fetchmany(10) - self.assertEqual(len(rows),0) + self.assertEqual(len(rows), 0) else: self.assertListEqual([row.items() for row in rows], - [[('COUNTRY', 'USA'), ('CURRENCY', 'Dollar')], - [('COUNTRY', 'England'), ('CURRENCY', 'Pound')], - [('COUNTRY', 'Canada'), ('CURRENCY', 'CdnDlr')], - [('COUNTRY', 'Switzerland'), ('CURRENCY', 'SFranc')], - [('COUNTRY', 'Japan'), ('CURRENCY', 'Yen')], - [('COUNTRY', 'Italy'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'France'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'Germany'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'Australia'), ('CURRENCY', 'ADollar')], - [('COUNTRY', 'Hong Kong'), ('CURRENCY', 'HKDollar')]]) + [[('COUNTRY', 'USA'), ('CURRENCY', 'Dollar')], + [('COUNTRY', 'England'), ('CURRENCY', 'Pound')], + [('COUNTRY', 'Canada'), ('CURRENCY', 'CdnDlr')], + [('COUNTRY', 'Switzerland'), ('CURRENCY', 'SFranc')], + [('COUNTRY', 'Japan'), ('CURRENCY', 'Yen')], + [('COUNTRY', 'Italy'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'France'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'Germany'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'Australia'), ('CURRENCY', 'ADollar')], + [('COUNTRY', 'Hong Kong'), ('CURRENCY', 'HKDollar')]]) rows = cur.fetchmanymap(10) self.assertListEqual([row.items() for row in rows], - [[('COUNTRY', 'Netherlands'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'Belgium'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'Austria'), ('CURRENCY', 'Euro')], - [('COUNTRY', 'Fiji'), ('CURRENCY', 'FDollar')], - [('COUNTRY', 'Russia'), ('CURRENCY', 'Ruble')], - [('COUNTRY', 'Romania'), ('CURRENCY', 'RLeu')]]) + [[('COUNTRY', 'Netherlands'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'Belgium'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'Austria'), ('CURRENCY', 'Euro')], + [('COUNTRY', 'Fiji'), ('CURRENCY', 'FDollar')], + [('COUNTRY', 'Russia'), ('CURRENCY', 'Ruble')], + [('COUNTRY', 'Romania'), ('CURRENCY', 'RLeu')]]) rows = cur.fetchmany(10) - self.assertEqual(len(rows),0) + self.assertEqual(len(rows), 0) def test_rowcount(self): cur = self.con.cursor() - self.assertEqual(cur.rowcount,-1) + self.assertEqual(cur.rowcount, -1) cur.execute('select * from project') - self.assertEqual(cur.rowcount,0) + self.assertEqual(cur.rowcount, 0) cur.fetchone() rcount = 1 if FBTEST_HOST == '' and self.con.engine_version >= 3.0 else 6 - self.assertEqual(cur.rowcount,rcount) + self.assertEqual(cur.rowcount, rcount) def test_name(self): def assign_name(): cur.name = 'testx' cur = self.con.cursor() self.assertIsNone(cur.name) - self.assertRaises(fdb.ProgrammingError,assign_name) + self.assertRaises(fdb.ProgrammingError, assign_name) cur.execute('select * from country') cur.name = 'test' - self.assertEqual(cur.name,'test') - self.assertRaises(fdb.ProgrammingError,assign_name) + self.assertEqual(cur.name, 'test') + self.assertRaises(fdb.ProgrammingError, assign_name) def test_use_after_close(self): cmd = 'select * from country' cur = self.con.cursor() @@ -998,16 +1048,14 @@ cur.close() cur.execute(cmd) row = cur.fetchone() - self.assertTupleEqual(row,('USA', 'Dollar')) + self.assertTupleEqual(row, ('USA', 'Dollar')) class TestPreparedStatement(FDBTestBase): def setUp(self): - super(TestPreparedStatement,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + super(TestPreparedStatement, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) #self.con.execute_immediate("recreate table t (c1 integer)") #self.con.commit() def tearDown(self): @@ -1017,22 +1065,22 @@ def test_basic(self): cur = self.con.cursor() ps = cur.prep('select * from country') - self.assertEqual(ps._in_sqlda.sqln,10) - self.assertEqual(ps._in_sqlda.sqld,0) - self.assertEqual(ps._out_sqlda.sqln,10) - self.assertEqual(ps._out_sqlda.sqld,2) - self.assertEqual(ps.statement_type,1) - self.assertEqual(ps.sql,'select * from country') + self.assertEqual(ps._in_sqlda.sqln, 10) + self.assertEqual(ps._in_sqlda.sqld, 0) + self.assertEqual(ps._out_sqlda.sqln, 10) + self.assertEqual(ps._out_sqlda.sqld, 2) + self.assertEqual(ps.statement_type, 1) + self.assertEqual(ps.sql, 'select * from country') def test_get_plan(self): cur = self.con.cursor() ps = cur.prep('select * from job') - self.assertEqual(ps.plan,"PLAN (JOB NATURAL)") + self.assertEqual(ps.plan, "PLAN (JOB NATURAL)") def test_execution(self): cur = self.con.cursor() ps = cur.prep('select * from country') cur.execute(ps) row = cur.fetchone() - self.assertTupleEqual(row,('USA', 'Dollar')) + self.assertTupleEqual(row, ('USA', 'Dollar')) def test_wrong_cursor(self): cur = self.con.cursor() cur2 = self.con.cursor() @@ -1040,16 +1088,15 @@ with self.assertRaises(ValueError) as cm: cur2.execute(ps) self.assertTupleEqual(cm.exception.args, - ('PreparedStatement was created by different Cursor.',)) + ('PreparedStatement was created by different Cursor.',)) + class TestArrays(FDBTestBase): def setUp(self): - super(TestArrays,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + super(TestArrays, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) tbl = """recreate table AR (c1 integer, c2 integer[1:4,0:3,1:2], c3 varchar(15)[0:5,1:2], @@ -1082,12 +1129,13 @@ self.c13 = [decimal.Decimal('10.2'), decimal.Decimal('100000.3')] self.c14 = [decimal.Decimal('10.22222'), decimal.Decimal('100000.333')] self.c15 = [decimal.Decimal('1000000000000.22222'), decimal.Decimal('1000000000000.333')] + self.c16 = [True, False, True] #self.con.execute_immediate(tbl) #self.con.commit() #cur = self.con.cursor() #cur.execute("insert into ar (c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12) values (1,?,?,?,?,?,?,?,?,?,?,?)", #[self.c2,self.c3,self.c4,self.c5,self.c6,self.c7,self.c8,self.c9, - #self.c10,self.c11,self.c12]) + #self.c10,self.c11,self.c12]) #cur.execute("insert into ar (c1,c2) values (2,?)",[self.c2]) #cur.execute("insert into ar (c1,c3) values (3,?)",[self.c3]) #cur.execute("insert into ar (c1,c4) values (4,?)",[self.c4]) @@ -1113,185 +1161,191 @@ "where job_code='Eng' and job_grade=3 and job_country='Japan'") row = cur.fetchone() self.assertTupleEqual(row, - (['Japanese\n', 'Mandarin\n', 'English\n', '\n', '\n'],)) + (['Japanese\n', 'Mandarin\n', 'English\n', '\n', '\n'],)) cur.execute('select QUART_HEAD_CNT from proj_dept_budget') row = cur.fetchall() self.assertListEqual(row, - [([1, 1, 1, 0],), ([3, 2, 1, 0],), ([0, 0, 0, 1],), ([2, 1, 0, 0],), - ([1, 1, 0, 0],), ([1, 1, 0, 0],), ([1, 1, 1, 1],), ([2, 3, 2, 1],), - ([1, 1, 2, 2],), ([1, 1, 1, 2],), ([1, 1, 1, 2],), ([4, 5, 6, 6],), - ([2, 2, 0, 3],), ([1, 1, 2, 2],), ([7, 7, 4, 4],), ([2, 3, 3, 3],), - ([4, 5, 6, 6],), ([1, 1, 1, 1],), ([4, 5, 5, 3],), ([4, 3, 2, 2],), - ([2, 2, 2, 1],), ([1, 1, 2, 3],), ([3, 3, 1, 1],), ([1, 1, 0, 0],)]) + [([1, 1, 1, 0],), ([3, 2, 1, 0],), ([0, 0, 0, 1],), ([2, 1, 0, 0],), + ([1, 1, 0, 0],), ([1, 1, 0, 0],), ([1, 1, 1, 1],), ([2, 3, 2, 1],), + ([1, 1, 2, 2],), ([1, 1, 1, 2],), ([1, 1, 1, 2],), ([4, 5, 6, 6],), + ([2, 2, 0, 3],), ([1, 1, 2, 2],), ([7, 7, 4, 4],), ([2, 3, 3, 3],), + ([4, 5, 6, 6],), ([1, 1, 1, 1],), ([4, 5, 5, 3],), ([4, 3, 2, 2],), + ([2, 2, 2, 1],), ([1, 1, 2, 3],), ([3, 3, 1, 1],), ([1, 1, 0, 0],)]) def test_read_full(self): cur = self.con.cursor() cur.execute("select c1,c2 from ar where c1=2") row = cur.fetchone() - self.assertListEqual(row[1],self.c2) + self.assertListEqual(row[1], self.c2) cur.execute("select c1,c3 from ar where c1=3") row = cur.fetchone() - self.assertListEqual(row[1],self.c3) + self.assertListEqual(row[1], self.c3) cur.execute("select c1,c4 from ar where c1=4") row = cur.fetchone() - self.assertListEqual(row[1],self.c4) + self.assertListEqual(row[1], self.c4) cur.execute("select c1,c5 from ar where c1=5") row = cur.fetchone() - self.assertListEqual(row[1],self.c5) + self.assertListEqual(row[1], self.c5) cur.execute("select c1,c6 from ar where c1=6") row = cur.fetchone() - self.assertListEqual(row[1],self.c6) + self.assertListEqual(row[1], self.c6) cur.execute("select c1,c7 from ar where c1=7") row = cur.fetchone() - self.assertListEqual(row[1],self.c7) + self.assertListEqual(row[1], self.c7) cur.execute("select c1,c8 from ar where c1=8") row = cur.fetchone() - self.assertListEqual(row[1],self.c8) + self.assertListEqual(row[1], self.c8) cur.execute("select c1,c9 from ar where c1=9") row = cur.fetchone() - self.assertListEqual(row[1],self.c9) + self.assertListEqual(row[1], self.c9) cur.execute("select c1,c10 from ar where c1=10") row = cur.fetchone() - self.assertListEqual(row[1],self.c10) + self.assertListEqual(row[1], self.c10) cur.execute("select c1,c11 from ar where c1=11") row = cur.fetchone() - self.assertListEqual(row[1],self.c11) + self.assertListEqual(row[1], self.c11) cur.execute("select c1,c12 from ar where c1=12") row = cur.fetchone() - self.assertListEqual(row[1],self.c12) + self.assertListEqual(row[1], self.c12) cur.execute("select c1,c13 from ar where c1=13") row = cur.fetchone() - self.assertListEqual(row[1],self.c13) + self.assertListEqual(row[1], self.c13) cur.execute("select c1,c14 from ar where c1=14") row = cur.fetchone() - self.assertListEqual(row[1],self.c14) + self.assertListEqual(row[1], self.c14) cur.execute("select c1,c15 from ar where c1=15") row = cur.fetchone() - self.assertListEqual(row[1],self.c15) + self.assertListEqual(row[1], self.c15) def test_write_full(self): cur = self.con.cursor() # INTEGER - cur.execute("insert into ar (c1,c2) values (102,?)",[self.c2]) + cur.execute("insert into ar (c1,c2) values (102,?)", [self.c2]) self.con.commit() cur.execute("select c1,c2 from ar where c1=102") row = cur.fetchone() - self.assertListEqual(row[1],self.c2) + self.assertListEqual(row[1], self.c2) # VARCHAR - cur.execute("insert into ar (c1,c3) values (103,?)",[self.c3]) + cur.execute("insert into ar (c1,c3) values (103,?)", [self.c3]) self.con.commit() cur.execute("select c1,c3 from ar where c1=103") row = cur.fetchone() - self.assertListEqual(row[1],self.c3) + self.assertListEqual(row[1], self.c3) - cur.execute("insert into ar (c1,c3) values (103,?)",[tuple(self.c3)]) + cur.execute("insert into ar (c1,c3) values (103,?)", [tuple(self.c3)]) self.con.commit() cur.execute("select c1,c3 from ar where c1=103") row = cur.fetchone() - self.assertListEqual(row[1],self.c3) + self.assertListEqual(row[1], self.c3) # CHAR - cur.execute("insert into ar (c1,c4) values (104,?)",[self.c4]) + cur.execute("insert into ar (c1,c4) values (104,?)", [self.c4]) self.con.commit() cur.execute("select c1,c4 from ar where c1=104") row = cur.fetchone() - self.assertListEqual(row[1],self.c4) + self.assertListEqual(row[1], self.c4) # TIMESTAMP - cur.execute("insert into ar (c1,c5) values (105,?)",[self.c5]) + cur.execute("insert into ar (c1,c5) values (105,?)", [self.c5]) self.con.commit() cur.execute("select c1,c5 from ar where c1=105") row = cur.fetchone() - self.assertListEqual(row[1],self.c5) + self.assertListEqual(row[1], self.c5) # TIME OK - cur.execute("insert into ar (c1,c6) values (106,?)",[self.c6]) + cur.execute("insert into ar (c1,c6) values (106,?)", [self.c6]) self.con.commit() cur.execute("select c1,c6 from ar where c1=106") row = cur.fetchone() - self.assertListEqual(row[1],self.c6) + self.assertListEqual(row[1], self.c6) # DECIMAL(10,2) - cur.execute("insert into ar (c1,c7) values (107,?)",[self.c7]) + cur.execute("insert into ar (c1,c7) values (107,?)", [self.c7]) self.con.commit() cur.execute("select c1,c7 from ar where c1=107") row = cur.fetchone() - self.assertListEqual(row[1],self.c7) + self.assertListEqual(row[1], self.c7) # NUMERIC(10,2) - cur.execute("insert into ar (c1,c8) values (108,?)",[self.c8]) + cur.execute("insert into ar (c1,c8) values (108,?)", [self.c8]) self.con.commit() cur.execute("select c1,c8 from ar where c1=108") row = cur.fetchone() - self.assertListEqual(row[1],self.c8) + self.assertListEqual(row[1], self.c8) # SMALLINT - cur.execute("insert into ar (c1,c9) values (109,?)",[self.c9]) + cur.execute("insert into ar (c1,c9) values (109,?)", [self.c9]) self.con.commit() cur.execute("select c1,c9 from ar where c1=109") row = cur.fetchone() - self.assertListEqual(row[1],self.c9) + self.assertListEqual(row[1], self.c9) # BIGINT - cur.execute("insert into ar (c1,c10) values (110,?)",[self.c10]) + cur.execute("insert into ar (c1,c10) values (110,?)", [self.c10]) self.con.commit() cur.execute("select c1,c10 from ar where c1=110") row = cur.fetchone() - self.assertListEqual(row[1],self.c10) + self.assertListEqual(row[1], self.c10) # FLOAT - cur.execute("insert into ar (c1,c11) values (111,?)",[self.c11]) + cur.execute("insert into ar (c1,c11) values (111,?)", [self.c11]) self.con.commit() cur.execute("select c1,c11 from ar where c1=111") row = cur.fetchone() - self.assertListEqual(row[1],self.c11) + self.assertListEqual(row[1], self.c11) # DOUBLE PRECISION - cur.execute("insert into ar (c1,c12) values (112,?)",[self.c12]) + cur.execute("insert into ar (c1,c12) values (112,?)", [self.c12]) self.con.commit() cur.execute("select c1,c12 from ar where c1=112") row = cur.fetchone() - self.assertListEqual(row[1],self.c12) + self.assertListEqual(row[1], self.c12) # DECIMAL(10,1) OK - cur.execute("insert into ar (c1,c13) values (113,?)",[self.c13]) + cur.execute("insert into ar (c1,c13) values (113,?)", [self.c13]) self.con.commit() cur.execute("select c1,c13 from ar where c1=113") row = cur.fetchone() - self.assertListEqual(row[1],self.c13) + self.assertListEqual(row[1], self.c13) # DECIMAL(10,5) - cur.execute("insert into ar (c1,c14) values (114,?)",[self.c14]) + cur.execute("insert into ar (c1,c14) values (114,?)", [self.c14]) self.con.commit() cur.execute("select c1,c14 from ar where c1=114") row = cur.fetchone() - self.assertListEqual(row[1],self.c14) + self.assertListEqual(row[1], self.c14) # DECIMAL(18,5) - cur.execute("insert into ar (c1,c15) values (115,?)",[self.c15]) + cur.execute("insert into ar (c1,c15) values (115,?)", [self.c15]) self.con.commit() cur.execute("select c1,c15 from ar where c1=115") row = cur.fetchone() - self.assertListEqual(row[1],self.c15) + self.assertListEqual(row[1], self.c15) + + if self.version == FB30: + # BOOLEAN + cur.execute("insert into ar (c1,c16) values (116,?)", [self.c16]) + self.con.commit() + cur.execute("select c1,c16 from ar where c1=116") + row = cur.fetchone() + self.assertListEqual(row[1], self.c16) def test_write_wrong(self): cur = self.con.cursor() with self.assertRaises(ValueError) as cm: - cur.execute("insert into ar (c1,c2) values (102,?)",[self.c3]) - self.assertTupleEqual(cm.exception.args,('Incorrect ARRAY field value.',)) + cur.execute("insert into ar (c1,c2) values (102,?)", [self.c3]) + self.assertTupleEqual(cm.exception.args, ('Incorrect ARRAY field value.',)) with self.assertRaises(ValueError) as cm: - cur.execute("insert into ar (c1,c2) values (102,?)",[self.c2[:-1]]) - self.assertTupleEqual(cm.exception.args,('Incorrect ARRAY field value.',)) + cur.execute("insert into ar (c1,c2) values (102,?)", [self.c2[:-1]]) + self.assertTupleEqual(cm.exception.args, ('Incorrect ARRAY field value.',)) class TestInsertData(FDBTestBase): def setUp(self): - super(TestInsertData,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) - self.con2 = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD, + super(TestInsertData, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) + self.con2 = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD, charset='utf-8') #self.con.execute_immediate("recreate table t (c1 integer)") #self.con.commit() @@ -1305,108 +1359,109 @@ self.con.close() def test_insert_integers(self): cur = self.con.cursor() - cur.execute('insert into T2 (C1,C2,C3) values (?,?,?)',[1,1,1]) + cur.execute('insert into T2 (C1,C2,C3) values (?,?,?)', [1, 1, 1]) self.con.commit() cur.execute('select C1,C2,C3 from T2 where C1 = 1') rows = cur.fetchall() - self.assertListEqual(rows,[(1, 1, 1)]) + self.assertListEqual(rows, [(1, 1, 1)]) cur.execute('insert into T2 (C1,C2,C3) values (?,?,?)', - [2,1,9223372036854775807]) + [2, 1, 9223372036854775807]) cur.execute('insert into T2 (C1,C2,C3) values (?,?,?)', - [2,1,-9223372036854775807-1]) + [2, 1, -9223372036854775807-1]) self.con.commit() cur.execute('select C1,C2,C3 from T2 where C1 = 2') rows = cur.fetchall() self.assertListEqual(rows, - [(2, 1, 9223372036854775807), (2, 1, -9223372036854775808)]) + [(2, 1, 9223372036854775807), (2, 1, -9223372036854775808)]) def test_insert_char_varchar(self): cur = self.con.cursor() - cur.execute('insert into T2 (C1,C4,C5) values (?,?,?)',[2,'AA','AA']) + cur.execute('insert into T2 (C1,C4,C5) values (?,?,?)', [2, 'AA', 'AA']) self.con.commit() cur.execute('select C1,C4,C5 from T2 where C1 = 2') rows = cur.fetchall() - self.assertListEqual(rows,[(2, 'AA ', 'AA')]) + self.assertListEqual(rows, [(2, 'AA ', 'AA')]) # Too long values with self.assertRaises(ValueError) as cm: - cur.execute('insert into T2 (C1,C4) values (?,?)',[3,'123456']) + cur.execute('insert into T2 (C1,C4) values (?,?)', [3, '123456']) self.con.commit() self.assertTupleEqual(cm.exception.args, - ('Value of parameter (1) is too long, expected 5, found 6',)) + ('Value of parameter (1) is too long, expected 5, found 6',)) with self.assertRaises(ValueError) as cm: - cur.execute('insert into T2 (C1,C5) values (?,?)',[3,'12345678901']) + cur.execute('insert into T2 (C1,C5) values (?,?)', [3, '12345678901']) self.con.commit() self.assertTupleEqual(cm.exception.args, - ('Value of parameter (1) is too long, expected 10, found 11',)) + ('Value of parameter (1) is too long, expected 10, found 11',)) def test_insert_datetime(self): cur = self.con.cursor() - now = datetime.datetime(2011,11,13,15,00,1,200) - cur.execute('insert into T2 (C1,C6,C7,C8) values (?,?,?,?)',[3,now.date(),now.time(),now]) + now = datetime.datetime(2011, 11, 13, 15, 00, 1, 200) + cur.execute('insert into T2 (C1,C6,C7,C8) values (?,?,?,?)', [3, now.date(), now.time(), now]) self.con.commit() cur.execute('select C1,C6,C7,C8 from T2 where C1 = 3') rows = cur.fetchall() self.assertListEqual(rows, - [(3, datetime.date(2011, 11, 13), datetime.time(15, 0, 1, 200), - datetime.datetime(2011, 11, 13, 15, 0, 1, 200))]) + [(3, datetime.date(2011, 11, 13), datetime.time(15, 0, 1, 200), + datetime.datetime(2011, 11, 13, 15, 0, 1, 200))]) - cur.execute('insert into T2 (C1,C6,C7,C8) values (?,?,?,?)',[4,'2011-11-13','15:0:1:200','2011-11-13 15:0:1:200']) + cur.execute('insert into T2 (C1,C6,C7,C8) values (?,?,?,?)', [4, '2011-11-13', '15:0:1:200', '2011-11-13 15:0:1:200']) self.con.commit() cur.execute('select C1,C6,C7,C8 from T2 where C1 = 4') rows = cur.fetchall() self.assertListEqual(rows, - [(4, datetime.date(2011, 11, 13), datetime.time(15, 0, 1, 200000), - datetime.datetime(2011, 11, 13, 15, 0, 1, 200000))]) + [(4, datetime.date(2011, 11, 13), datetime.time(15, 0, 1, 200000), + datetime.datetime(2011, 11, 13, 15, 0, 1, 200000))]) def test_insert_blob(self): cur = self.con.cursor() cur2 = self.con2.cursor() - cur.execute('insert into T2 (C1,C9) values (?,?)',[4,'This is a BLOB!']) + cur.execute('insert into T2 (C1,C9) values (?,?)', [4, 'This is a BLOB!']) cur.transaction.commit() cur.execute('select C1,C9 from T2 where C1 = 4') rows = cur.fetchall() - self.assertListEqual(rows,[(4, 'This is a BLOB!')]) + self.assertListEqual(rows, [(4, 'This is a BLOB!')]) # Non-textual BLOB - blob_data = fdb.bs([0,1,2,3,4,5,6,7,8,9,10]) - cur.execute('insert into T2 (C1,C16) values (?,?)',[8,blob_data]) + blob_data = fdb.bs([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + cur.execute('insert into T2 (C1,C16) values (?,?)', [8, blob_data]) cur.transaction.commit() cur.execute('select C1,C16 from T2 where C1 = 8') rows = cur.fetchall() - self.assertListEqual(rows,[(8, blob_data)]) + self.assertListEqual(rows, [(8, blob_data)]) # BLOB bigger than max. segment size big_blob = '123456789' * 10000 - cur.execute('insert into T2 (C1,C9) values (?,?)',[5,big_blob]) + cur.execute('insert into T2 (C1,C9) values (?,?)', [5, big_blob]) cur.transaction.commit() cur.execute('select C1,C9 from T2 where C1 = 5') row = cur.fetchone() - self.assertEqual(row[1],big_blob) + self.assertIsInstance(row[1], fdb.BlobReader) + self.assertEqual(row[1].read(), big_blob) # Unicode in BLOB blob_text = 'This is a BLOB!' - if not isinstance(blob_text,ibase.myunicode): + if not isinstance(blob_text, ibase.myunicode): blob_text = blob_text.decode('utf-8') - cur2.execute('insert into T2 (C1,C9) values (?,?)',[6,blob_text]) + cur2.execute('insert into T2 (C1,C9) values (?,?)', [6, blob_text]) cur2.transaction.commit() cur2.execute('select C1,C9 from T2 where C1 = 6') rows = cur2.fetchall() - self.assertListEqual(rows,[(6, blob_text)]) + self.assertListEqual(rows, [(6, blob_text)]) # Unicode non-textual BLOB with self.assertRaises(TypeError) as cm: - cur2.execute('insert into T2 (C1,C16) values (?,?)',[7,blob_text]) + cur2.execute('insert into T2 (C1,C16) values (?,?)', [7, blob_text]) self.assertTupleEqual(cm.exception.args, - ("Unicode strings are not acceptable input for a non-textual BLOB column.",)) + ("Unicode strings are not acceptable input for a non-textual BLOB column.",)) def test_insert_float_double(self): cur = self.con.cursor() - cur.execute('insert into T2 (C1,C12,C13) values (?,?,?)',[5,1.0,1.0]) + cur.execute('insert into T2 (C1,C12,C13) values (?,?,?)', [5, 1.0, 1.0]) self.con.commit() cur.execute('select C1,C12,C13 from T2 where C1 = 5') rows = cur.fetchall() - self.assertListEqual(rows,[(5, 1.0, 1.0)]) - cur.execute('insert into T2 (C1,C12,C13) values (?,?,?)',[6,1,1]) + self.assertListEqual(rows, [(5, 1.0, 1.0)]) + cur.execute('insert into T2 (C1,C12,C13) values (?,?,?)', [6, 1, 1]) self.con.commit() cur.execute('select C1,C12,C13 from T2 where C1 = 6') rows = cur.fetchall() - self.assertListEqual(rows,[(6, 1.0, 1.0)]) + self.assertListEqual(rows, [(6, 1.0, 1.0)]) def test_insert_numeric_decimal(self): cur = self.con.cursor() - cur.execute('insert into T2 (C1,C10,C11) values (?,?,?)',[6,1.1,1.1]) - cur.execute('insert into T2 (C1,C10,C11) values (?,?,?)',[6,decimal.Decimal('100.11'),decimal.Decimal('100.11')]) + cur.execute('insert into T2 (C1,C10,C11) values (?,?,?)', [6, 1.1, 1.1]) + cur.execute('insert into T2 (C1,C10,C11) values (?,?,?)', [6, decimal.Decimal('100.11'), decimal.Decimal('100.11')]) self.con.commit() cur.execute('select C1,C10,C11 from T2 where C1 = 6') rows = cur.fetchall() @@ -1415,72 +1470,76 @@ (6, Decimal('100.11'), Decimal('100.11'))]) def test_insert_returning(self): cur = self.con.cursor() - cur.execute('insert into T2 (C1,C10,C11) values (?,?,?) returning C1',[6,1.1,1.1]) + cur.execute('insert into T2 (C1,C10,C11) values (?,?,?) returning C1', [7, 1.1, 1.1]) result = cur.fetchall() - self.assertListEqual(result,[(6,)]) + self.assertListEqual(result, [(7,)]) + def test_insert_boolean(self): + if self.version == FB30: + cur = self.con.cursor() + cur.execute('insert into T2 (C1,C17) values (?,?) returning C1', [8, True]) + cur.execute('insert into T2 (C1,C17) values (?,?) returning C1', [8, False]) + cur.execute('select C1,C17 from T2 where C1 = 8') + result = cur.fetchall() + self.assertListEqual(result, [(8, True), (8, False)]) class TestStoredProc(FDBTestBase): def setUp(self): - super(TestStoredProc,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + super(TestStoredProc, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) def tearDown(self): self.con.close() def test_callproc(self): cur = self.con.cursor() - result = cur.callproc('sub_tot_budget',['100']) - self.assertListEqual(result,['100']) + result = cur.callproc('sub_tot_budget', ['100']) + self.assertListEqual(result, ['100']) row = cur.fetchone() - self.assertTupleEqual(row,(Decimal('3800000'), Decimal('760000'), - Decimal('500000'), Decimal('1500000'))) - result = cur.callproc('sub_tot_budget',[100]) - self.assertListEqual(result,[100]) + self.assertTupleEqual(row, (Decimal('3800000'), Decimal('760000'), + Decimal('500000'), Decimal('1500000'))) + result = cur.callproc('sub_tot_budget', [100]) + self.assertListEqual(result, [100]) row = cur.fetchone() - self.assertTupleEqual(row,(Decimal('3800000'), Decimal('760000'), - Decimal('500000'), Decimal('1500000'))) + self.assertTupleEqual(row, (Decimal('3800000'), Decimal('760000'), + Decimal('500000'), Decimal('1500000'))) class TestServices(FDBTestBase): def setUp(self): - super(TestServices,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) + super(TestServices, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) def test_attach(self): - svc = fdb.services.connect(host=FBTEST_HOST,password=FBTEST_PASSWORD) + svc = fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD) svc.close() def test_query(self): - svc = fdb.services.connect(host=FBTEST_HOST,password=FBTEST_PASSWORD) - self.assertEqual(svc.get_service_manager_version(),2) - self.assertIn('Firebird',svc.get_server_version()) - self.assertIn('Firebird',svc.get_architecture()) + svc = fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD) + self.assertEqual(svc.get_service_manager_version(), 2) + self.assertIn('Firebird', svc.get_server_version()) + self.assertIn('Firebird', svc.get_architecture()) x = svc.get_home_directory() #self.assertEqual(x,'/opt/firebird/') if svc.engine_version < 3.0: - self.assertIn('security2.fdb',svc.get_security_database_path()) + self.assertIn('security2.fdb', svc.get_security_database_path()) else: - self.assertIn('security3.fdb',svc.get_security_database_path()) + self.assertIn('security3.fdb', svc.get_security_database_path()) x = svc.get_lock_file_directory() #self.assertEqual(x,'/tmp/firebird/') x = svc.get_server_capabilities() - self.assertIsInstance(x,type(tuple())) + self.assertIsInstance(x, type(tuple())) x = svc.get_message_file_directory() #self.assertEqual(x,'/opt/firebird/') - con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) - con2 = fdb.connect(host=FBTEST_HOST,database='employee', - user=FBTEST_USER,password=FBTEST_PASSWORD) - self.assertGreaterEqual(len(svc.get_attached_database_names()),2) + con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) + con2 = fdb.connect(host=FBTEST_HOST, database='employee', + user=FBTEST_USER, password=FBTEST_PASSWORD) + self.assertGreaterEqual(len(svc.get_attached_database_names()), 2, "Should work for Superserver, may fail with value 0 for Classic") self.assertIn(self.dbfile.upper(), [s.upper() for s in svc.get_attached_database_names()]) #self.assertIn('/opt/firebird/examples/empbuild/employee.fdb',x) - self.assertGreaterEqual(svc.get_connection_count(),2) + self.assertGreaterEqual(svc.get_connection_count(), 2) svc.close() def test_running(self): - svc = fdb.services.connect(host=FBTEST_HOST,password=FBTEST_PASSWORD) + svc = fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD) self.assertFalse(svc.isrunning()) svc.get_log() #self.assertTrue(svc.isrunning()) @@ -1490,10 +1549,10 @@ self.assertFalse(svc.isrunning()) svc.close() def test_wait(self): - svc = fdb.services.connect(host=FBTEST_HOST,password=FBTEST_PASSWORD) + svc = fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD) self.assertFalse(svc.isrunning()) svc.get_log() - #self.assertTrue(svc.isrunning()) + self.assertTrue(svc.isrunning()) self.assertTrue(svc.fetching) svc.wait() self.assertFalse(svc.isrunning()) @@ -1502,19 +1561,17 @@ class TestServices2(FDBTestBase): def setUp(self): - super(TestServices2,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.fbk = os.path.join(self.dbpath,'test_employee.fbk') - self.fbk2 = os.path.join(self.dbpath,'test_employee.fbk2') - self.rfdb = os.path.join(self.dbpath,'test_employee.fdb') - self.svc = fdb.services.connect(host=FBTEST_HOST,password=FBTEST_PASSWORD) - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + super(TestServices2, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.fbk = os.path.join(self.dbpath, 'test_employee.fbk') + self.fbk2 = os.path.join(self.dbpath, 'test_employee.fbk2') + self.rfdb = os.path.join(self.dbpath, 'test_employee.fdb') + self.svc = fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) if not os.path.exists(self.rfdb): - c = fdb.create_database(host=FBTEST_HOST,database=self.rfdb, - user=FBTEST_USER,password=FBTEST_PASSWORD) + c = fdb.create_database(host=FBTEST_HOST, database=self.rfdb, + user=FBTEST_USER, password=FBTEST_PASSWORD) c.close() def tearDown(self): self.svc.close() @@ -1536,20 +1593,21 @@ log = self.svc.readlines() self.assertFalse(self.svc.fetching) self.assertTrue(log) - self.assertIsInstance(log,type(list())) + self.assertIsInstance(log, type(list())) # iterate over result self.svc.get_log() for line in self.svc: self.assertIsNotNone(line) - self.assertIsInstance(line,fdb.StringType) + self.assertIsInstance(line, fdb.StringType) self.assertFalse(self.svc.fetching) # callback output = [] self.svc.get_log(callback=fetchline) - self.assertGreater(len(output),0) + self.assertGreater(len(output), 0) + self.assertEqual(output, log) def test_getLimboTransactionIDs(self): ids = self.svc.get_limbo_transaction_ids('employee') - self.assertIsInstance(ids,type(list())) + self.assertIsInstance(ids, type(list())) def test_getStatistics(self): def fetchline(line): output.append(line) @@ -1560,33 +1618,33 @@ stats = self.svc.readlines() self.assertFalse(self.svc.fetching) self.assertFalse(self.svc.isrunning()) - self.assertIsInstance(stats,type(list())) + self.assertIsInstance(stats, type(list())) # iterate over result self.svc.get_statistics('employee', show_system_tables_and_indexes=True, show_record_versions=True) for line in self.svc: - self.assertIsInstance(line,fdb.StringType) + self.assertIsInstance(line, fdb.StringType) self.assertFalse(self.svc.fetching) # callback output = [] self.svc.get_statistics('employee', callback=fetchline) - self.assertGreater(len(output),0) + self.assertGreater(len(output), 0) # fetch only selected tables stats = self.svc.get_statistics('employee', show_user_data_pages=True, tables='COUNTRY') stats = '\n'.join(self.svc.readlines()) - self.assertIn('COUNTRY',stats) - self.assertNotIn('JOB',stats) + self.assertIn('COUNTRY', stats) + self.assertNotIn('JOB', stats) # stats = self.svc.get_statistics('employee', show_user_data_pages=True, - tables=('COUNTRY','PROJECT')) + tables=('COUNTRY', 'PROJECT')) stats = '\n'.join(self.svc.readlines()) - self.assertIn('COUNTRY',stats) - self.assertIn('PROJECT',stats) - self.assertNotIn('JOB',stats) + self.assertIn('COUNTRY', stats) + self.assertIn('PROJECT', stats) + self.assertNotIn('JOB', stats) def test_backup(self): def fetchline(line): output.append(line) @@ -1598,7 +1656,7 @@ self.assertFalse(self.svc.fetching) self.assertFalse(self.svc.isrunning()) self.assertTrue(os.path.exists(self.fbk)) - self.assertIsInstance(report,type(list())) + self.assertIsInstance(report, type(list())) # iterate over result self.svc.backup('employee', self.fbk, ignore_checksums=1, @@ -1606,24 +1664,24 @@ metadata_only=1, collect_garbage=0, transportable=0, - convert_external_tables_to_internal=1, + convert_external_tables=1, compressed=0, no_db_triggers=0) for line in self.svc: self.assertIsNotNone(line) - self.assertIsInstance(line,fdb.StringType) + self.assertIsInstance(line, fdb.StringType) self.assertFalse(self.svc.fetching) # callback output = [] self.svc.backup('employee', self.fbk, callback=fetchline) - self.assertGreater(len(output),0) + self.assertGreater(len(output), 0) # Firebird 3.0 stats if self.con.ods >= fdb.ODS_FB_30: output = [] self.svc.backup('employee', self.fbk, callback=fetchline, - stats=[fdb.services.STATS_TOTAL_TIME,fdb.services.STATS_TIME_DELTA, - fdb.services.STATS_PAGE_READS,fdb.services.STATS_PAGE_WRITES]) - self.assertGreater(len(output),0) + stats=[fdb.services.STATS_TOTAL_TIME, fdb.services.STATS_TIME_DELTA, + fdb.services.STATS_PAGE_READS, fdb.services.STATS_PAGE_WRITES]) + self.assertGreater(len(output), 0) self.assertIn('gbak: time delta reads writes ', output) def test_restore(self): def fetchline(line): @@ -1637,26 +1695,42 @@ # fetch materialized report = self.svc.readlines() self.assertFalse(self.svc.fetching) + time.sleep(1) # Sometimes service is still running after there is no more data to fetch (slower shutdown) self.assertFalse(self.svc.isrunning()) - self.assertIsInstance(report,type(list())) + self.assertIsInstance(report, type(list())) # iterate over result self.svc.restore(self.fbk, self.rfdb, replace=1) for line in self.svc: self.assertIsNotNone(line) - self.assertIsInstance(line,fdb.StringType) + self.assertIsInstance(line, fdb.StringType) self.assertFalse(self.svc.fetching) # callback output = [] self.svc.restore(self.fbk, self.rfdb, replace=1, callback=fetchline) - self.assertGreater(len(output),0) + self.assertGreater(len(output), 0) # Firebird 3.0 stats if self.con.ods >= fdb.ODS_FB_30: output = [] self.svc.restore(self.fbk, self.rfdb, replace=1, callback=fetchline, - stats=[fdb.services.STATS_TOTAL_TIME,fdb.services.STATS_TIME_DELTA, - fdb.services.STATS_PAGE_READS,fdb.services.STATS_PAGE_WRITES]) - self.assertGreater(len(output),0) + stats=[fdb.services.STATS_TOTAL_TIME, fdb.services.STATS_TIME_DELTA, + fdb.services.STATS_PAGE_READS, fdb.services.STATS_PAGE_WRITES]) + self.assertGreater(len(output), 0) self.assertIn('gbak: time delta reads writes ', output) + def test_local_backup(self): + self.svc.backup('employee', self.fbk) + self.svc.wait() + with open(self.fbk, mode='rb') as f: + bkp = f.read() + backup_stream = BytesIO() + self.svc.local_backup('employee', backup_stream) + backup_stream.seek(0) + self.assertEqual(bkp, backup_stream.read()) + def test_local_restore(self): + backup_stream = BytesIO() + self.svc.local_backup('employee', backup_stream) + backup_stream.seek(0) + self.svc.local_restore(backup_stream, self.rfdb, replace=1) + self.assertTrue(os.path.exists(self.rfdb)) def test_nbackup(self): if self.con.engine_version < 2.5: return @@ -1684,110 +1758,112 @@ max_sql_length 2048 """ % self.dbfile - svc2 = fdb.services.connect(host=FBTEST_HOST,password=FBTEST_PASSWORD) - svcx = fdb.services.connect(host=FBTEST_HOST,password=FBTEST_PASSWORD) + svc2 = fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD) + svcx = fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD) # Start trace sessions - trace1_id = self.svc.trace_start(trace_config,'test_trace_1') + trace1_id = self.svc.trace_start(trace_config, 'test_trace_1') trace2_id = svc2.trace_start(trace_config) # check sessions sessions = svcx.trace_list() - self.assertIn(trace1_id,sessions) - self.assertListEqual(list(sessions[trace1_id].keys()), - ['date', 'flags', 'name', 'user']) - self.assertIn(trace2_id,sessions) - self.assertListEqual(list(sessions[trace2_id].keys()), - ['date', 'flags', 'user']) + self.assertIn(trace1_id, sessions) + seq = list(sessions[trace1_id].keys()) + seq.sort() + self.assertListEqual(seq,['date', 'flags', 'name', 'user']) + self.assertIn(trace2_id, sessions) + seq = list(sessions[trace2_id].keys()) + seq.sort() + self.assertListEqual(seq,['date', 'flags', 'user']) if self.con.engine_version < 3.0: - self.assertListEqual(sessions[trace1_id]['flags'],['active', ' admin', ' trace']) - self.assertListEqual(sessions[trace2_id]['flags'],['active', ' admin', ' trace']) + self.assertListEqual(sessions[trace1_id]['flags'], ['active', ' admin', ' trace']) + self.assertListEqual(sessions[trace2_id]['flags'], ['active', ' admin', ' trace']) else: - self.assertListEqual(sessions[trace1_id]['flags'],['active', ' trace']) - self.assertListEqual(sessions[trace2_id]['flags'],['active', ' trace']) + self.assertListEqual(sessions[trace1_id]['flags'], ['active', ' trace']) + self.assertListEqual(sessions[trace2_id]['flags'], ['active', ' trace']) # Pause session svcx.trace_suspend(trace2_id) - self.assertIn('suspend',svcx.trace_list()[trace2_id]['flags']) + self.assertIn('suspend', svcx.trace_list()[trace2_id]['flags']) # Resume session svcx.trace_resume(trace2_id) - self.assertIn('active',svcx.trace_list()[trace2_id]['flags']) + self.assertIn('active', svcx.trace_list()[trace2_id]['flags']) # Stop session svcx.trace_stop(trace2_id) - self.assertNotIn(trace2_id,svcx.trace_list()) + self.assertNotIn(trace2_id, svcx.trace_list()) # Finalize svcx.trace_stop(trace1_id) svc2.close() svcx.close() def test_setDefaultPageBuffers(self): - self.svc.set_default_page_buffers(self.rfdb,100) + self.svc.set_default_page_buffers(self.rfdb, 100) def test_setSweepInterval(self): - self.svc.set_sweep_interval(self.rfdb,10000) + self.svc.set_sweep_interval(self.rfdb, 10000) def test_shutdown_bringOnline(self): if self.con.engine_version < 2.5: # Basic shutdown/online self.svc.shutdown(self.rfdb, fdb.services.SHUT_LEGACY, - fdb.services.SHUT_FORCE,0) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertIn('multi-user maintenance',''.join(self.svc.readlines())) + fdb.services.SHUT_FORCE, 0) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertIn('multi-user maintenance', ''.join(self.svc.readlines())) # Return to normal state - self.svc.bring_online(self.rfdb,fdb.services.SHUT_LEGACY) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertNotIn('multi-user maintenance',''.join(self.svc.readlines())) + self.svc.bring_online(self.rfdb, fdb.services.SHUT_LEGACY) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertNotIn('multi-user maintenance', ''.join(self.svc.readlines())) else: # Shutdown database to single-user maintenance mode self.svc.shutdown(self.rfdb, fdb.services.SHUT_SINGLE, - fdb.services.SHUT_FORCE,0) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertIn('single-user maintenance',''.join(self.svc.readlines())) + fdb.services.SHUT_FORCE, 0) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertIn('single-user maintenance', ''.join(self.svc.readlines())) # Enable multi-user maintenance - self.svc.bring_online(self.rfdb,fdb.services.SHUT_MULTI) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertIn('multi-user maintenance',''.join(self.svc.readlines())) + self.svc.bring_online(self.rfdb, fdb.services.SHUT_MULTI) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertIn('multi-user maintenance', ''.join(self.svc.readlines())) # Go to full shutdown mode, disabling new attachments during 5 seconds self.svc.shutdown(self.rfdb, fdb.services.SHUT_FULL, - fdb.services.SHUT_DENY_NEW_ATTACHMENTS,5) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertIn('full shutdown',''.join(self.svc.readlines())) + fdb.services.SHUT_DENY_NEW_ATTACHMENTS, 5) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertIn('full shutdown', ''.join(self.svc.readlines())) # Enable single-user maintenance - self.svc.bring_online(self.rfdb,fdb.services.SHUT_SINGLE) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertIn('single-user maintenance',''.join(self.svc.readlines())) + self.svc.bring_online(self.rfdb, fdb.services.SHUT_SINGLE) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertIn('single-user maintenance', ''.join(self.svc.readlines())) # Return to normal state self.svc.bring_online(self.rfdb) def test_setShouldReservePageSpace(self): - self.svc.set_reserve_page_space(self.rfdb,False) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertIn('no reserve',''.join(self.svc.readlines())) - self.svc.set_reserve_page_space(self.rfdb,True) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertNotIn('no reserve',''.join(self.svc.readlines())) + self.svc.set_reserve_page_space(self.rfdb, False) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertIn('no reserve', ''.join(self.svc.readlines())) + self.svc.set_reserve_page_space(self.rfdb, True) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertNotIn('no reserve', ''.join(self.svc.readlines())) def test_setWriteMode(self): # Forced writes - self.svc.set_write_mode(self.rfdb,fdb.services.WRITE_FORCED) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertIn('force write',''.join(self.svc.readlines())) + self.svc.set_write_mode(self.rfdb, fdb.services.WRITE_FORCED) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertIn('force write', ''.join(self.svc.readlines())) # No Forced writes - self.svc.set_write_mode(self.rfdb,fdb.services.WRITE_BUFFERED) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertNotIn('force write',''.join(self.svc.readlines())) + self.svc.set_write_mode(self.rfdb, fdb.services.WRITE_BUFFERED) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertNotIn('force write', ''.join(self.svc.readlines())) def test_setAccessMode(self): # Read Only - self.svc.set_access_mode(self.rfdb,fdb.services.ACCESS_READ_ONLY) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertIn('read only',''.join(self.svc.readlines())) + self.svc.set_access_mode(self.rfdb, fdb.services.ACCESS_READ_ONLY) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertIn('read only', ''.join(self.svc.readlines())) # Read/Write - self.svc.set_access_mode(self.rfdb,fdb.services.ACCESS_READ_WRITE) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertNotIn('read only',''.join(self.svc.readlines())) + self.svc.set_access_mode(self.rfdb, fdb.services.ACCESS_READ_WRITE) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertNotIn('read only', ''.join(self.svc.readlines())) def test_setSQLDialect(self): - self.svc.set_sql_dialect(self.rfdb,1) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertIn('Database dialect\t1',''.join(self.svc.readlines())) - self.svc.set_sql_dialect(self.rfdb,3) - self.svc.get_statistics(self.rfdb,show_only_db_header_pages=1) - self.assertIn('Database dialect\t3',''.join(self.svc.readlines())) + self.svc.set_sql_dialect(self.rfdb, 1) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertIn('Database dialect\t1', ''.join(self.svc.readlines())) + self.svc.set_sql_dialect(self.rfdb, 3) + self.svc.get_statistics(self.rfdb, show_only_db_header_pages=1) + self.assertIn('Database dialect\t3', ''.join(self.svc.readlines())) def test_activateShadowFile(self): self.svc.activate_shadow(self.rfdb) def test_nolinger(self): @@ -1807,31 +1883,31 @@ report = self.svc.readlines() self.assertFalse(self.svc.fetching) self.assertFalse(self.svc.isrunning()) - self.assertIsInstance(report,type(list())) - self.assertIn('Validation started','/n'.join(report)) - self.assertIn('Validation finished','/n'.join(report)) + self.assertIsInstance(report, type(list())) + self.assertIn('Validation started', '/n'.join(report)) + self.assertIn('Validation finished', '/n'.join(report)) # iterate over result self.svc.validate(self.dbfile) for line in self.svc: self.assertIsNotNone(line) - self.assertIsInstance(line,fdb.StringType) + self.assertIsInstance(line, fdb.StringType) self.assertFalse(self.svc.fetching) # callback output = [] self.svc.validate(self.dbfile, callback=fetchline) - self.assertGreater(len(output),0) + self.assertGreater(len(output), 0) # Parameters - self.svc.validate(self.dbfile,include_tables='COUNTRY|SALES', - include_indices='SALESTATX',lock_timeout=-1) + self.svc.validate(self.dbfile, include_tables='COUNTRY|SALES', + include_indices='SALESTATX', lock_timeout=-1) report = '/n'.join(self.svc.readlines()) self.assertIn('(COUNTRY)', report) self.assertIn('(SALES)', report) self.assertIn('(SALESTATX)', report) def test_getUsers(self): users = self.svc.get_users() - self.assertIsInstance(users,type(list())) - self.assertIsInstance(users[0],fdb.services.User) - self.assertEqual(users[0].name,'SYSDBA') + self.assertIsInstance(users, type(list())) + self.assertIsInstance(users[0], fdb.services.User) + self.assertEqual(users[0].name, 'SYSDBA') def test_manage_user(self): user = fdb.services.User('FDB_TEST') user.password = 'FDB_TEST' @@ -1840,18 +1916,21 @@ user.last_name = 'TEST' try: self.svc.remove_user(user) - except: - pass + except fdb.DatabaseError as e: + if 'SQLCODE: -85' in e.args[0]: + pass + else: + raise e self.svc.add_user(user) self.assertTrue(self.svc.user_exists(user)) self.assertTrue(self.svc.user_exists('FDB_TEST')) users = [u for u in self.svc.get_users() if u.name == 'FDB_TEST'] self.assertTrue(users) - self.assertEqual(len(users),1) + self.assertEqual(len(users), 1) #self.assertEqual(users[0].password,'FDB_TEST') - self.assertEqual(users[0].first_name,'FDB') - self.assertEqual(users[0].middle_name,'X.') - self.assertEqual(users[0].last_name,'TEST') + self.assertEqual(users[0].first_name, 'FDB') + self.assertEqual(users[0].middle_name, 'X.') + self.assertEqual(users[0].last_name, 'TEST') user.password = 'XFDB_TEST' user.first_name = 'XFDB' user.middle_name = 'XX.' @@ -1859,25 +1938,23 @@ self.svc.modify_user(user) users = [u for u in self.svc.get_users() if u.name == 'FDB_TEST'] self.assertTrue(users) - self.assertEqual(len(users),1) + self.assertEqual(len(users), 1) #self.assertEqual(users[0].password,'XFDB_TEST') - self.assertEqual(users[0].first_name,'XFDB') - self.assertEqual(users[0].middle_name,'XX.') - self.assertEqual(users[0].last_name,'XTEST') + self.assertEqual(users[0].first_name, 'XFDB') + self.assertEqual(users[0].middle_name, 'XX.') + self.assertEqual(users[0].last_name, 'XTEST') self.svc.remove_user(user) self.assertFalse(self.svc.user_exists('FDB_TEST')) class TestEvents(FDBTestBase): def setUp(self): - super(TestEvents,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,'fbevents.fdb') + super(TestEvents, self).setUp() + self.dbfile = os.path.join(self.dbpath, 'fbevents.fdb') if os.path.exists(self.dbfile): os.remove(self.dbfile) - self.con = fdb.create_database(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + self.con = fdb.create_database(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) c = self.con.cursor() c.execute("CREATE TABLE T (PK Integer, C1 Integer)") c.execute("""CREATE TRIGGER EVENTS_AU FOR T ACTIVE @@ -1911,12 +1988,12 @@ c.execute(cmd) self.con.commit() - timed_event = threading.Timer(3.0,send_events,args=[["insert into T (PK,C1) values (1,1)",]]) + timed_event = threading.Timer(3.0, send_events, args=[["insert into T (PK,C1) values (1,1)",]]) with self.con.event_conduit(['insert_1']) as events: timed_event.start() e = events.wait() timed_event.join() - self.assertDictEqual(e,{'insert_1': 1}) + self.assertDictEqual(e, {'insert_1': 1}) def test_multiple_events(self): def send_events(command_list): c = self.con.cursor() @@ -1928,12 +2005,12 @@ "insert into T (PK,C1) values (1,3)", "insert into T (PK,C1) values (1,1)", "insert into T (PK,C1) values (1,2)",] - timed_event = threading.Timer(3.0,send_events,args=[cmds]) - with self.con.event_conduit(['insert_1','insert_3']) as events: + timed_event = threading.Timer(3.0, send_events, args=[cmds]) + with self.con.event_conduit(['insert_1', 'insert_3']) as events: timed_event.start() e = events.wait() timed_event.join() - self.assertDictEqual(e,{'insert_3': 1, 'insert_1': 2}) + self.assertDictEqual(e, {'insert_3': 1, 'insert_1': 2}) def test_20_events(self): def send_events(command_list): c = self.con.cursor() @@ -1946,18 +2023,18 @@ "insert into T (PK,C1) values (1,1)", "insert into T (PK,C1) values (1,2)",] self.e = {} - timed_event = threading.Timer(1.0,send_events,args=[cmds]) - with self.con.event_conduit(['insert_1','A','B','C','D', - 'E','F','G','H','I','J','K','L','M', - 'N','O','P','Q','R','insert_3']) as events: + timed_event = threading.Timer(1.0, send_events, args=[cmds]) + with self.con.event_conduit(['insert_1', 'A', 'B', 'C', 'D', + 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', + 'N', 'O', 'P', 'Q', 'R', 'insert_3']) as events: timed_event.start() time.sleep(3) e = events.wait() timed_event.join() self.assertDictEqual(e, - {'A': 0, 'C': 0, 'B': 0, 'E': 0, 'D': 0, 'G': 0, 'insert_1': 2, - 'I': 0, 'H': 0, 'K': 0, 'J': 0, 'M': 0, 'L': 0, 'O': 0, 'N': 0, - 'Q': 0, 'P': 0, 'R': 0, 'insert_3': 1, 'F': 0}) + {'A': 0, 'C': 0, 'B': 0, 'E': 0, 'D': 0, 'G': 0, 'insert_1': 2, + 'I': 0, 'H': 0, 'K': 0, 'J': 0, 'M': 0, 'L': 0, 'O': 0, 'N': 0, + 'Q': 0, 'P': 0, 'R': 0, 'insert_3': 1, 'F': 0}) def test_flush_events(self): def send_events(command_list): c = self.con.cursor() @@ -1965,7 +2042,7 @@ c.execute(cmd) self.con.commit() - timed_event = threading.Timer(3.0,send_events,args=[["insert into T (PK,C1) values (1,1)",]]) + timed_event = threading.Timer(3.0, send_events, args=[["insert into T (PK,C1) values (1,1)",]]) with self.con.event_conduit(['insert_1']) as events: send_events(["insert into T (PK,C1) values (1,1)", "insert into T (PK,C1) values (1,1)"]) @@ -1974,16 +2051,14 @@ timed_event.start() e = events.wait() timed_event.join() - self.assertDictEqual(e,{'insert_1': 1}) + self.assertDictEqual(e, {'insert_1': 1}) class TestStreamBLOBs(FDBTestBase): def setUp(self): - super(TestStreamBLOBs,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + super(TestStreamBLOBs, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) #self.con.execute_immediate("recreate table t (c1 integer)") #self.con.commit() #self.con.execute_immediate("RECREATE TABLE T2 (C1 Smallint,C2 Integer,C3 Bigint,C4 Char(5),C5 Varchar(10),C6 Date,C7 Time,C8 Timestamp,C9 Blob sub_type 1,C10 Numeric(18,2),C11 Decimal(18,2),C12 Float,C13 Double precision,C14 Numeric(8,4),C15 Decimal(8,4))") @@ -1999,7 +2074,7 @@ Each chunk starts with a two byte length indicator followed by however many bytes of data were passed as a segment. Stream blobs are stored as a continuous array of data bytes with no length indicators included.""" cur = self.con.cursor() - cur.execute('insert into T2 (C1,C9) values (?,?)',[4,StringIO(blob)]) + cur.execute('insert into T2 (C1,C9) values (?,?)', [4, StringIO(blob)]) self.con.commit() p = cur.prep('select C1,C9 from T2 where C1 = 4') p.set_stream_blob('C9') @@ -2009,32 +2084,31 @@ ## Necessary to avoid bad BLOB handle on BlobReader.close in tearDown ## because BLOB handle is no longer valid after table purge with closing(p): - self.assertEqual(blob_reader.read(20),'Firebird supports tw') - self.assertEqual(blob_reader.read(20),'o types of blobs, st') - self.assertEqual(blob_reader.read(400), - 'ream and segmented.\nThe database stores segmented blobs in ' \ - 'chunks.\nEach chunk starts with a two byte length indicator ' \ - 'followed by however many bytes of data were passed as ' \ - 'a segment.\nStream blobs are stored as a continuous array ' \ - 'of data bytes with no length indicators included.') - self.assertEqual(blob_reader.read(20),'') - self.assertEqual(blob_reader.tell(),318) + self.assertEqual(blob_reader.read(20), 'Firebird supports tw') + self.assertEqual(blob_reader.read(20), 'o types of blobs, st') + self.assertEqual(blob_reader.read(400), 'ream and segmented.\nThe database stores segmented blobs in ' + 'chunks.\nEach chunk starts with a two byte length indicator ' + 'followed by however many bytes of data were passed as ' + 'a segment.\nStream blobs are stored as a continuous array ' + 'of data bytes with no length indicators included.') + self.assertEqual(blob_reader.read(20), '') + self.assertEqual(blob_reader.tell(), 318) blob_reader.seek(20) - self.assertEqual(blob_reader.tell(),20) - self.assertEqual(blob_reader.read(20),'o types of blobs, st') + self.assertEqual(blob_reader.tell(), 20) + self.assertEqual(blob_reader.read(20), 'o types of blobs, st') blob_reader.seek(0) - self.assertEqual(blob_reader.tell(),0) - self.assertListEqual(blob_reader.readlines(),StringIO(blob).readlines()) + self.assertEqual(blob_reader.tell(), 0) + self.assertListEqual(blob_reader.readlines(), StringIO(blob).readlines()) blob_reader.seek(0) for line in blob_reader: - self.assertIn(line.rstrip('\n'),blob.split('\n')) + self.assertIn(line.rstrip('\n'), blob.split('\n')) blob_reader.seek(0) - self.assertEqual(blob_reader.read(),blob) - blob_reader.seek(-9,os.SEEK_END) - self.assertEqual(blob_reader.read(),'included.') - blob_reader.seek(-20,os.SEEK_END) - blob_reader.seek(11,os.SEEK_CUR) - self.assertEqual(blob_reader.read(),'included.') + self.assertEqual(blob_reader.read(), blob) + blob_reader.seek(-9, os.SEEK_END) + self.assertEqual(blob_reader.read(), 'included.') + blob_reader.seek(-20, os.SEEK_END) + blob_reader.seek(11, os.SEEK_CUR) + self.assertEqual(blob_reader.read(), 'included.') blob_reader.seek(60) self.assertEqual(blob_reader.readline(), 'The database stores segmented blobs in chunks.\n') @@ -2044,8 +2118,8 @@ Each chunk starts with a two byte length indicator followed by however many bytes of data were passed as a segment. Stream blobs are stored as a continuous array of data bytes with no length indicators included.""" cur = self.con.cursor() - cur.execute('insert into T2 (C1,C9) values (?,?)',[1,StringIO(blob)]) - cur.execute('insert into T2 (C1,C9) values (?,?)',[2,StringIO(blob)]) + cur.execute('insert into T2 (C1,C9) values (?,?)', [1, StringIO(blob)]) + cur.execute('insert into T2 (C1,C9) values (?,?)', [2, StringIO(blob)]) self.con.commit() p = cur.prep('select C1,C9 from T2') p.set_stream_blob('C9') @@ -2056,46 +2130,43 @@ with closing(p): for row in cur: blob_reader = row[1] - self.assertEqual(blob_reader.read(20),'Firebird supports tw') - self.assertEqual(blob_reader.read(20),'o types of blobs, st') - self.assertEqual(blob_reader.read(400), - 'ream and segmented.\nThe database stores segmented blobs ' \ - 'in chunks.\nEach chunk starts with a two byte length ' \ - 'indicator followed by however many bytes of data were ' \ - 'passed as a segment.\nStream blobs are stored as a ' \ - 'continuous array of data bytes with no length indicators ' \ - 'included.') - self.assertEqual(blob_reader.read(20),'') - self.assertEqual(blob_reader.tell(),318) + self.assertEqual(blob_reader.read(20), 'Firebird supports tw') + self.assertEqual(blob_reader.read(20), 'o types of blobs, st') + self.assertEqual(blob_reader.read(400), 'ream and segmented.\nThe database stores segmented blobs ' + 'in chunks.\nEach chunk starts with a two byte length ' + 'indicator followed by however many bytes of data were ' + 'passed as a segment.\nStream blobs are stored as a ' + 'continuous array of data bytes with no length indicators ' + 'included.') + self.assertEqual(blob_reader.read(20), '') + self.assertEqual(blob_reader.tell(), 318) blob_reader.seek(20) - self.assertEqual(blob_reader.tell(),20) - self.assertEqual(blob_reader.read(20),'o types of blobs, st') + self.assertEqual(blob_reader.tell(), 20) + self.assertEqual(blob_reader.read(20), 'o types of blobs, st') blob_reader.seek(0) - self.assertEqual(blob_reader.tell(),0) + self.assertEqual(blob_reader.tell(), 0) self.assertListEqual(blob_reader.readlines(), StringIO(blob).readlines()) blob_reader.seek(0) for line in blob_reader: - self.assertIn(line.rstrip('\n'),blob.split('\n')) + self.assertIn(line.rstrip('\n'), blob.split('\n')) blob_reader.seek(0) - self.assertEqual(blob_reader.read(),blob) - blob_reader.seek(-9,os.SEEK_END) - self.assertEqual(blob_reader.read(),'included.') - blob_reader.seek(-20,os.SEEK_END) - blob_reader.seek(11,os.SEEK_CUR) - self.assertEqual(blob_reader.read(),'included.') + self.assertEqual(blob_reader.read(), blob) + blob_reader.seek(-9, os.SEEK_END) + self.assertEqual(blob_reader.read(), 'included.') + blob_reader.seek(-20, os.SEEK_END) + blob_reader.seek(11, os.SEEK_CUR) + self.assertEqual(blob_reader.read(), 'included.') blob_reader.seek(60) self.assertEqual(blob_reader.readline(), - 'The database stores segmented blobs in chunks.\n') + 'The database stores segmented blobs in chunks.\n') class TestCharsetConversion(FDBTestBase): def setUp(self): - super(TestCharsetConversion,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD, + super(TestCharsetConversion, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD, charset='utf8') #self.con.execute_immediate("recreate table t (c1 integer)") #self.con.commit() @@ -2107,19 +2178,19 @@ self.con.commit() self.con.close() def test_octets(self): - bytestring = fdb.fbcore.bs([1,2,3,4,5]) + bytestring = fdb.fbcore.bs([1, 2, 3, 4, 5]) cur = self.con.cursor() cur.execute("insert into T4 (C1, C_OCTETS, V_OCTETS) values (?,?,?)", - (1, bytestring,bytestring)) + (1, bytestring, bytestring)) self.con.commit() cur.execute("select C1, C_OCTETS, V_OCTETS from T4 where C1 = 1") row = cur.fetchone() if ibase.PYTHON_MAJOR_VER == 3: self.assertTupleEqual(row, - (1, b'\x01\x02\x03\x04\x05', b'\x01\x02\x03\x04\x05')) + (1, b'\x01\x02\x03\x04\x05', b'\x01\x02\x03\x04\x05')) else: self.assertTupleEqual(row, - (1, '\x01\x02\x03\x04\x05', '\x01\x02\x03\x04\x05')) + (1, '\x01\x02\x03\x04\x05', '\x01\x02\x03\x04\x05')) def test_utf82win1250(self): s5 = 'ěščřž' s30 = 'ěščřžýáíéúůďťňóĚŠČŘŽÝÁÍÉÚŮĎŤŇÓ' @@ -2127,39 +2198,39 @@ s5 = s5.decode('utf8') s30 = s30.decode('utf8') - con1250 = fdb.connect(host=FBTEST_HOST,database=self.dbfile,user=FBTEST_USER, - password=FBTEST_PASSWORD,charset='win1250') + con1250 = fdb.connect(host=FBTEST_HOST, database=self.dbfile, user=FBTEST_USER, + password=FBTEST_PASSWORD, charset='win1250') c_utf8 = self.con.cursor() c_win1250 = con1250.cursor() # Insert unicode data c_utf8.execute("insert into T4 (C1, C_WIN1250, V_WIN1250, C_UTF8, V_UTF8)" "values (?,?,?,?,?)", - (1,s5,s30,s5,s30)) + (1, s5, s30, s5, s30)) self.con.commit() # Should return the same unicode content when read from win1250 or utf8 connection c_win1250.execute("select C1, C_WIN1250, V_WIN1250," "C_UTF8, V_UTF8 from T4 where C1 = 1") row = c_win1250.fetchone() - self.assertTupleEqual(row,(1,s5,s30,s5,s30)) + self.assertTupleEqual(row, (1, s5, s30, s5, s30)) c_utf8.execute("select C1, C_WIN1250, V_WIN1250," "C_UTF8, V_UTF8 from T4 where C1 = 1") row = c_utf8.fetchone() - self.assertTupleEqual(row,(1,s5,s30,s5,s30)) + self.assertTupleEqual(row, (1, s5, s30, s5, s30)) def testCharVarchar(self): s = 'Introdução' if ibase.PYTHON_MAJOR_VER != 3: s = s.decode('utf8') - self.assertEqual(len(s),10) - data = tuple([1,s,s]) + self.assertEqual(len(s), 10) + data = tuple([1, s, s]) cur = self.con.cursor() - cur.execute('insert into T3 (C1,C2,C3) values (?,?,?)',data) + cur.execute('insert into T3 (C1,C2,C3) values (?,?,?)', data) self.con.commit() cur.execute('select C1,C2,C3 from T3 where C1 = 1') row = cur.fetchone() - self.assertEqual(row,data) + self.assertEqual(row, data) def testBlob(self): s = """Introdução @@ -2169,38 +2240,36 @@ Porém você poderá trocar entre ambos com um mínimo de luta. """ if ibase.PYTHON_MAJOR_VER != 3: s = s.decode('utf8') - self.assertEqual(len(s),292) - data = tuple([2,s]) - b_data = tuple([3,ibase.b('bytestring')]) + self.assertEqual(len(s), 292) + data = tuple([2, s]) + b_data = tuple([3, ibase.b('bytestring')]) cur = self.con.cursor() # Text BLOB - cur.execute('insert into T3 (C1,C4) values (?,?)',data) + cur.execute('insert into T3 (C1,C4) values (?,?)', data) self.con.commit() cur.execute('select C1,C4 from T3 where C1 = 2') row = cur.fetchone() - self.assertEqual(row,data) + self.assertEqual(row, data) # Insert Unicode into non-textual BLOB with self.assertRaises(TypeError) as cm: - cur.execute('insert into T3 (C1,C5) values (?,?)',data) + cur.execute('insert into T3 (C1,C5) values (?,?)', data) self.con.commit() self.assertTupleEqual(cm.exception.args, - ('Unicode strings are not acceptable input for a non-textual BLOB column.',)) + ('Unicode strings are not acceptable input for a non-textual BLOB column.',)) # Read binary from non-textual BLOB - cur.execute('insert into T3 (C1,C5) values (?,?)',b_data) + cur.execute('insert into T3 (C1,C5) values (?,?)', b_data) self.con.commit() cur.execute('select C1,C5 from T3 where C1 = 3') row = cur.fetchone() - self.assertEqual(row,b_data) + self.assertEqual(row, b_data) class TestSchema(FDBTestBase): def setUp(self): - super(TestSchema,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) + super(TestSchema, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) #self.dbfile = '/home/data/db/employee30.fdb' - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) def tearDown(self): self.con.close() def testSchemaBindClose(self): @@ -2210,12 +2279,12 @@ # properties self.assertIsNone(s.description) self.assertIsNone(s.linger) - self.assertEqual(s.owner_name,'SYSDBA') - self.assertEqual(s.default_character_set.name,'NONE') + self.assertEqual(s.owner_name, 'SYSDBA') + self.assertEqual(s.default_character_set.name, 'NONE') if self.con.ods < fdb.ODS_FB_30: self.assertIsNone(s.security_class) else: - self.assertEqual(s.security_class,'SQL$363') + self.assertEqual(s.security_class, 'SQL$363') self.assertFalse(s.closed) # s.close() @@ -2231,348 +2300,352 @@ s = self.con.schema # enum_* disctionaries self.assertDictEqual(s.enum_param_type_from, - {0: 'DATATYPE', 1: 'DOMAIN', 2: 'TYPE OF DOMAIN', 3: 'TYPE OF COLUMN'}) + {0: 'DATATYPE', 1: 'DOMAIN', 2: 'TYPE OF DOMAIN', 3: 'TYPE OF COLUMN'}) if self.con.ods <= fdb.ODS_FB_20: self.assertDictEqual(s.enum_object_types, - {0: 'RELATION', 1: 'VIEW', 2: 'TRIGGER', 3: 'COMPUTED_FIELD', - 4: 'VALIDATION', 5: 'PROCEDURE', 6: 'EXPRESSION_INDEX', - 7: 'EXCEPTION', 8: 'USER', 9: 'FIELD', 10: 'INDEX', - 11: 'DEPENDENT_COUNT', 12: 'USER_GROUP', 13: 'ROLE', - 14: 'GENERATOR', 15: 'UDF', 16: 'BLOB_FILTER'}) + {0: 'RELATION', 1: 'VIEW', 2: 'TRIGGER', 3: 'COMPUTED_FIELD', + 4: 'VALIDATION', 5: 'PROCEDURE', 6: 'EXPRESSION_INDEX', + 7: 'EXCEPTION', 8: 'USER', 9: 'FIELD', 10: 'INDEX', + 11: 'DEPENDENT_COUNT', 12: 'USER_GROUP', 13: 'ROLE', + 14: 'GENERATOR', 15: 'UDF', 16: 'BLOB_FILTER'}) self.assertDictEqual(s.enum_object_type_codes, - {'INDEX': 10, 'EXCEPTION': 7, 'GENERATOR': 14, 'UDF': 15, - 'EXPRESSION_INDEX': 6, 'FIELD': 9, 'COMPUTED_FIELD': 3, - 'TRIGGER': 2, 'RELATION': 0, 'USER': 8, 'DEPENDENT_COUNT': 11, - 'USER_GROUP': 12, 'BLOB_FILTER': 16, 'ROLE': 13, - 'VALIDATION': 4, 'PROCEDURE': 5, 'VIEW': 1}) + {'INDEX': 10, 'EXCEPTION': 7, 'GENERATOR': 14, 'UDF': 15, + 'EXPRESSION_INDEX': 6, 'FIELD': 9, 'COMPUTED_FIELD': 3, + 'TRIGGER': 2, 'RELATION': 0, 'USER': 8, 'DEPENDENT_COUNT': 11, + 'USER_GROUP': 12, 'BLOB_FILTER': 16, 'ROLE': 13, + 'VALIDATION': 4, 'PROCEDURE': 5, 'VIEW': 1}) elif self.con.ods > fdb.ODS_FB_20 and self.con.ods < fdb.ODS_FB_30: self.assertDictEqual(s.enum_object_types, - {0: 'RELATION', 1: 'VIEW', 2: 'TRIGGER', 3: 'COMPUTED_FIELD', - 4: 'VALIDATION', 5: 'PROCEDURE', 6: 'EXPRESSION_INDEX', - 7: 'EXCEPTION', 8: 'USER', 9: 'FIELD', 10: 'INDEX', - 12: 'USER_GROUP', 13: 'ROLE', 14: 'GENERATOR', 15: 'UDF', - 16: 'BLOB_FILTER', 17: 'COLLATION'}) + {0: 'RELATION', 1: 'VIEW', 2: 'TRIGGER', 3: 'COMPUTED_FIELD', + 4: 'VALIDATION', 5: 'PROCEDURE', 6: 'EXPRESSION_INDEX', + 7: 'EXCEPTION', 8: 'USER', 9: 'FIELD', 10: 'INDEX', + 12: 'USER_GROUP', 13: 'ROLE', 14: 'GENERATOR', 15: 'UDF', + 16: 'BLOB_FILTER', 17: 'COLLATION'}) self.assertDictEqual(s.enum_object_type_codes, - {'INDEX': 10, 'EXCEPTION': 7, 'GENERATOR': 14, 'COLLATION': 17, - 'UDF': 15, 'EXPRESSION_INDEX': 6, 'FIELD': 9, - 'COMPUTED_FIELD': 3, 'TRIGGER': 2, 'RELATION': 0, 'USER': 8, - 'USER_GROUP': 12, 'BLOB_FILTER': 16, 'ROLE': 13, - 'VALIDATION': 4, 'PROCEDURE': 5, 'VIEW': 1}) + {'INDEX': 10, 'EXCEPTION': 7, 'GENERATOR': 14, 'COLLATION': 17, + 'UDF': 15, 'EXPRESSION_INDEX': 6, 'FIELD': 9, + 'COMPUTED_FIELD': 3, 'TRIGGER': 2, 'RELATION': 0, 'USER': 8, + 'USER_GROUP': 12, 'BLOB_FILTER': 16, 'ROLE': 13, + 'VALIDATION': 4, 'PROCEDURE': 5, 'VIEW': 1}) else: self.assertDictEqual(s.enum_object_types, - {0: 'RELATION', 1: 'VIEW', 2: 'TRIGGER', 3: 'COMPUTED_FIELD', - 4: 'VALIDATION', 5: 'PROCEDURE', 6: 'EXPRESSION_INDEX', - 7: 'EXCEPTION', 8: 'USER', 9: 'FIELD', 10: 'INDEX', - 11: 'CHARACTER_SET',12: 'USER_GROUP', 13: 'ROLE', - 14: 'GENERATOR', 15: 'UDF', 16: 'BLOB_FILTER', 17: 'COLLATION', - 18:'PACKAGE',19:'PACKAGE BODY'}) + {0: 'RELATION', 1: 'VIEW', 2: 'TRIGGER', 3: 'COMPUTED_FIELD', + 4: 'VALIDATION', 5: 'PROCEDURE', 6: 'EXPRESSION_INDEX', + 7: 'EXCEPTION', 8: 'USER', 9: 'FIELD', 10: 'INDEX', + 11: 'CHARACTER_SET', 12: 'USER_GROUP', 13: 'ROLE', + 14: 'GENERATOR', 15: 'UDF', 16: 'BLOB_FILTER', 17: 'COLLATION', + 18:'PACKAGE', 19:'PACKAGE BODY'}) self.assertDictEqual(s.enum_object_type_codes, - {'INDEX': 10, 'EXCEPTION': 7, 'GENERATOR': 14, 'COLLATION': 17, - 'UDF': 15, 'EXPRESSION_INDEX': 6, 'FIELD': 9, - 'COMPUTED_FIELD': 3, 'TRIGGER': 2, 'RELATION': 0, 'USER': 8, - 'USER_GROUP': 12, 'BLOB_FILTER': 16, 'ROLE': 13, - 'VALIDATION': 4, 'PROCEDURE': 5, 'VIEW': 1, 'CHARACTER_SET':11, - 'PACKAGE':18, 'PACKAGE BODY':19}) + {'INDEX': 10, 'EXCEPTION': 7, 'GENERATOR': 14, 'COLLATION': 17, + 'UDF': 15, 'EXPRESSION_INDEX': 6, 'FIELD': 9, + 'COMPUTED_FIELD': 3, 'TRIGGER': 2, 'RELATION': 0, 'USER': 8, + 'USER_GROUP': 12, 'BLOB_FILTER': 16, 'ROLE': 13, + 'VALIDATION': 4, 'PROCEDURE': 5, 'VIEW': 1, 'CHARACTER_SET':11, + 'PACKAGE':18, 'PACKAGE BODY':19}) if self.con.ods <= fdb.ODS_FB_20: self.assertDictEqual(s.enum_character_set_names, - {0: 'NONE', 1: 'BINARY', 2: 'ASCII7', 3: 'SQL_TEXT', 4: 'UTF-8', - 5: 'SJIS', 6: 'EUCJ', 9: 'DOS_737', 10: 'DOS_437', - 11: 'DOS_850', 12: 'DOS_865', 13: 'DOS_860', 14: 'DOS_863', - 15: 'DOS_775', 16: 'DOS_858', 17: 'DOS_862', 18: 'DOS_864', - 19: 'NEXT', 21: 'ANSI', 22: 'ISO-8859-2', 23: 'ISO-8859-3', - 34: 'ISO-8859-4', 35: 'ISO-8859-5', 36: 'ISO-8859-6', - 37: 'ISO-8859-7', 38: 'ISO-8859-8', 39: 'ISO-8859-9', - 40: 'ISO-8859-13', 44: 'WIN_949', 45: 'DOS_852', 46: 'DOS_857', - 47: 'DOS_861', 48: 'DOS_866', 49: 'DOS_869', 50: 'CYRL', - 51: 'WIN_1250', 52: 'WIN_1251', 53: 'WIN_1252', 54: 'WIN_1253', - 55: 'WIN_1254', 56: 'WIN_950', 57: 'WIN_936', 58: 'WIN_1255', - 59: 'WIN_1256', 60: 'WIN_1257', 63: 'KOI8R', 64: 'KOI8U', - 65: 'WIN1258'}) + {0: 'NONE', 1: 'BINARY', 2: 'ASCII7', 3: 'SQL_TEXT', 4: 'UTF-8', + 5: 'SJIS', 6: 'EUCJ', 9: 'DOS_737', 10: 'DOS_437', + 11: 'DOS_850', 12: 'DOS_865', 13: 'DOS_860', 14: 'DOS_863', + 15: 'DOS_775', 16: 'DOS_858', 17: 'DOS_862', 18: 'DOS_864', + 19: 'NEXT', 21: 'ANSI', 22: 'ISO-8859-2', 23: 'ISO-8859-3', + 34: 'ISO-8859-4', 35: 'ISO-8859-5', 36: 'ISO-8859-6', + 37: 'ISO-8859-7', 38: 'ISO-8859-8', 39: 'ISO-8859-9', + 40: 'ISO-8859-13', 44: 'WIN_949', 45: 'DOS_852', 46: 'DOS_857', + 47: 'DOS_861', 48: 'DOS_866', 49: 'DOS_869', 50: 'CYRL', + 51: 'WIN_1250', 52: 'WIN_1251', 53: 'WIN_1252', 54: 'WIN_1253', + 55: 'WIN_1254', 56: 'WIN_950', 57: 'WIN_936', 58: 'WIN_1255', + 59: 'WIN_1256', 60: 'WIN_1257', 63: 'KOI8R', 64: 'KOI8U', + 65: 'WIN1258'}) elif self.con.ods == fdb.ODS_FB_21: self.assertDictEqual(s.enum_character_set_names, - {0: 'NONE', 1: 'BINARY', 2: 'ASCII7', 3: 'SQL_TEXT', - 4: 'UTF-8', 5: 'SJIS', 6: 'EUCJ', 9: 'DOS_737', 10: 'DOS_437', - 11: 'DOS_850', 12: 'DOS_865', 13: 'DOS_860', 14: 'DOS_863', - 15: 'DOS_775', 16: 'DOS_858', 17: 'DOS_862', 18: 'DOS_864', - 19: 'NEXT', 21: 'ANSI', 22: 'ISO-8859-2', 23: 'ISO-8859-3', - 34: 'ISO-8859-4', 35: 'ISO-8859-5', 36: 'ISO-8859-6', - 37: 'ISO-8859-7', 38: 'ISO-8859-8', 39: 'ISO-8859-9', - 40: 'ISO-8859-13', 44: 'WIN_949', 45: 'DOS_852', 46: 'DOS_857', - 47: 'DOS_861', 48: 'DOS_866', 49: 'DOS_869', 50: 'CYRL', - 51: 'WIN_1250', 52: 'WIN_1251', 53: 'WIN_1252', 54: 'WIN_1253', - 55: 'WIN_1254', 56: 'WIN_950', 57: 'WIN_936', 58: 'WIN_1255', - 59: 'WIN_1256', 60: 'WIN_1257', 63: 'KOI8R', 64: 'KOI8U', - 65: 'WIN1258', 66: 'TIS620', 67: 'GBK', 68: 'CP943C'}) + {0: 'NONE', 1: 'BINARY', 2: 'ASCII7', 3: 'SQL_TEXT', + 4: 'UTF-8', 5: 'SJIS', 6: 'EUCJ', 9: 'DOS_737', 10: 'DOS_437', + 11: 'DOS_850', 12: 'DOS_865', 13: 'DOS_860', 14: 'DOS_863', + 15: 'DOS_775', 16: 'DOS_858', 17: 'DOS_862', 18: 'DOS_864', + 19: 'NEXT', 21: 'ANSI', 22: 'ISO-8859-2', 23: 'ISO-8859-3', + 34: 'ISO-8859-4', 35: 'ISO-8859-5', 36: 'ISO-8859-6', + 37: 'ISO-8859-7', 38: 'ISO-8859-8', 39: 'ISO-8859-9', + 40: 'ISO-8859-13', 44: 'WIN_949', 45: 'DOS_852', 46: 'DOS_857', + 47: 'DOS_861', 48: 'DOS_866', 49: 'DOS_869', 50: 'CYRL', + 51: 'WIN_1250', 52: 'WIN_1251', 53: 'WIN_1252', 54: 'WIN_1253', + 55: 'WIN_1254', 56: 'WIN_950', 57: 'WIN_936', 58: 'WIN_1255', + 59: 'WIN_1256', 60: 'WIN_1257', 63: 'KOI8R', 64: 'KOI8U', + 65: 'WIN1258', 66: 'TIS620', 67: 'GBK', 68: 'CP943C'}) elif self.con.ods >= fdb.ODS_FB_25: self.assertDictEqual(s.enum_character_set_names, - {0: 'NONE', 1: 'BINARY', 2: 'ASCII7', 3: 'SQL_TEXT', 4: 'UTF-8', - 5: 'SJIS', 6: 'EUCJ', 9: 'DOS_737', 10: 'DOS_437', 11: 'DOS_850', - 12: 'DOS_865', 13: 'DOS_860', 14: 'DOS_863', 15: 'DOS_775', - 16: 'DOS_858', 17: 'DOS_862', 18: 'DOS_864', 19: 'NEXT', - 21: 'ANSI', 22: 'ISO-8859-2', 23: 'ISO-8859-3', 34: 'ISO-8859-4', - 35: 'ISO-8859-5', 36: 'ISO-8859-6', 37: 'ISO-8859-7', - 38: 'ISO-8859-8', 39: 'ISO-8859-9', 40: 'ISO-8859-13', - 44: 'WIN_949', 45: 'DOS_852', 46: 'DOS_857', 47: 'DOS_861', - 48: 'DOS_866', 49: 'DOS_869', 50: 'CYRL', 51: 'WIN_1250', - 52: 'WIN_1251', 53: 'WIN_1252', 54: 'WIN_1253', 55: 'WIN_1254', - 56: 'WIN_950', 57: 'WIN_936', 58: 'WIN_1255', 59: 'WIN_1256', - 60: 'WIN_1257', 63: 'KOI8R', 64: 'KOI8U', 65: 'WIN_1258', - 66: 'TIS620', 67: 'GBK', 68: 'CP943C', 69: 'GB18030'}) + {0: 'NONE', 1: 'BINARY', 2: 'ASCII7', 3: 'SQL_TEXT', 4: 'UTF-8', + 5: 'SJIS', 6: 'EUCJ', 9: 'DOS_737', 10: 'DOS_437', 11: 'DOS_850', + 12: 'DOS_865', 13: 'DOS_860', 14: 'DOS_863', 15: 'DOS_775', + 16: 'DOS_858', 17: 'DOS_862', 18: 'DOS_864', 19: 'NEXT', + 21: 'ANSI', 22: 'ISO-8859-2', 23: 'ISO-8859-3', 34: 'ISO-8859-4', + 35: 'ISO-8859-5', 36: 'ISO-8859-6', 37: 'ISO-8859-7', + 38: 'ISO-8859-8', 39: 'ISO-8859-9', 40: 'ISO-8859-13', + 44: 'WIN_949', 45: 'DOS_852', 46: 'DOS_857', 47: 'DOS_861', + 48: 'DOS_866', 49: 'DOS_869', 50: 'CYRL', 51: 'WIN_1250', + 52: 'WIN_1251', 53: 'WIN_1252', 54: 'WIN_1253', 55: 'WIN_1254', + 56: 'WIN_950', 57: 'WIN_936', 58: 'WIN_1255', 59: 'WIN_1256', + 60: 'WIN_1257', 63: 'KOI8R', 64: 'KOI8U', 65: 'WIN_1258', + 66: 'TIS620', 67: 'GBK', 68: 'CP943C', 69: 'GB18030'}) if self.con.ods < fdb.ODS_FB_30: self.assertDictEqual(s.enum_field_types, - {35: 'TIMESTAMP', 37: 'VARYING', 7: 'SHORT', 8: 'LONG', - 9: 'QUAD', 10: 'FLOAT', 12: 'DATE', 45: 'BLOB_ID', 14: 'TEXT', - 13: 'TIME', 16: 'INT64', 40: 'CSTRING', 27: 'DOUBLE', - 261: 'BLOB'}) + {35: 'TIMESTAMP', 37: 'VARYING', 7: 'SHORT', 8: 'LONG', + 9: 'QUAD', 10: 'FLOAT', 12: 'DATE', 45: 'BLOB_ID', 14: 'TEXT', + 13: 'TIME', 16: 'INT64', 40: 'CSTRING', 27: 'DOUBLE', + 261: 'BLOB'}) else: self.assertDictEqual(s.enum_field_types, - {35: 'TIMESTAMP', 37: 'VARYING', 7: 'SHORT', 8: 'LONG', - 9: 'QUAD', 10: 'FLOAT', 12: 'DATE', 45: 'BLOB_ID', 14: 'TEXT', - 13: 'TIME', 16: 'INT64', 40: 'CSTRING', 27: 'DOUBLE', - 261: 'BLOB', 23:'BOOLEAN'}) + {35: 'TIMESTAMP', 37: 'VARYING', 7: 'SHORT', 8: 'LONG', + 9: 'QUAD', 10: 'FLOAT', 12: 'DATE', 45: 'BLOB_ID', 14: 'TEXT', + 13: 'TIME', 16: 'INT64', 40: 'CSTRING', 27: 'DOUBLE', + 261: 'BLOB', 23:'BOOLEAN'}) if self.con.ods <= fdb.ODS_FB_20: self.assertDictEqual(s.enum_field_subtypes, - {0: 'BINARY', 1: 'TEXT', 2: 'BLR', 3: 'ACL', 4: 'RANGES', - 5: 'SUMMARY', 6: 'FORMAT', 7: 'TRANSACTION_DESCRIPTION', - 8: 'EXTERNAL_FILE_DESCRIPTION'}) + {0: 'BINARY', 1: 'TEXT', 2: 'BLR', 3: 'ACL', 4: 'RANGES', + 5: 'SUMMARY', 6: 'FORMAT', 7: 'TRANSACTION_DESCRIPTION', + 8: 'EXTERNAL_FILE_DESCRIPTION'}) elif self.con.ods > fdb.ODS_FB_20: self.assertDictEqual(s.enum_field_subtypes, - {0: 'BINARY', 1: 'TEXT', 2: 'BLR', 3: 'ACL', 4: 'RANGES', - 5: 'SUMMARY', 6: 'FORMAT', 7: 'TRANSACTION_DESCRIPTION', - 8: 'EXTERNAL_FILE_DESCRIPTION', 9: 'DEBUG_INFORMATION'}) - self.assertDictEqual(s.enum_function_types,{0: 'VALUE', 1: 'BOOLEAN'}) + {0: 'BINARY', 1: 'TEXT', 2: 'BLR', 3: 'ACL', 4: 'RANGES', + 5: 'SUMMARY', 6: 'FORMAT', 7: 'TRANSACTION_DESCRIPTION', + 8: 'EXTERNAL_FILE_DESCRIPTION', 9: 'DEBUG_INFORMATION'}) + self.assertDictEqual(s.enum_function_types, {0: 'VALUE', 1: 'BOOLEAN'}) self.assertDictEqual(s.enum_mechanism_types, {0: 'BY_VALUE', 1: 'BY_REFERENCE', 2: 'BY_VMS_DESCRIPTOR', 3: 'BY_ISC_DESCRIPTOR', 4: 'BY_SCALAR_ARRAY_DESCRIPTOR', 5: 'BY_REFERENCE_WITH_NULL'}) if self.con.ods <= fdb.ODS_FB_20: - self.assertDictEqual(s.enum_parameter_mechanism_types,{}) + self.assertDictEqual(s.enum_parameter_mechanism_types, {}) elif self.con.ods > fdb.ODS_FB_20: self.assertDictEqual(s.enum_parameter_mechanism_types, {0: 'NORMAL', 1: 'TYPE OF'}) self.assertDictEqual(s.enum_procedure_types, {0: 'LEGACY', 1: 'SELECTABLE', 2: 'EXECUTABLE'}) if self.con.ods <= fdb.ODS_FB_20: - self.assertDictEqual(s.enum_relation_types,{}) + self.assertDictEqual(s.enum_relation_types, {}) elif self.con.ods > fdb.ODS_FB_20: self.assertDictEqual(s.enum_relation_types, - {0: 'PERSISTENT', 1: 'VIEW', 2: 'EXTERNAL', 3: 'VIRTUAL', - 4: 'GLOBAL_TEMPORARY_PRESERVE', 5: 'GLOBAL_TEMPORARY_DELETE'}) + {0: 'PERSISTENT', 1: 'VIEW', 2: 'EXTERNAL', 3: 'VIRTUAL', + 4: 'GLOBAL_TEMPORARY_PRESERVE', 5: 'GLOBAL_TEMPORARY_DELETE'}) if self.con.ods < fdb.ODS_FB_30: self.assertDictEqual(s.enum_system_flag_types, - {0: 'USER', 1: 'SYSTEM', 2: 'QLI', 3: 'CHECK_CONSTRAINT', - 4: 'REFERENTIAL_CONSTRAINT', 5: 'VIEW_CHECK'}) + {0: 'USER', 1: 'SYSTEM', 2: 'QLI', 3: 'CHECK_CONSTRAINT', + 4: 'REFERENTIAL_CONSTRAINT', 5: 'VIEW_CHECK'}) else: self.assertDictEqual(s.enum_system_flag_types, - {0: 'USER', 1: 'SYSTEM', 2: 'QLI', 3: 'CHECK_CONSTRAINT', - 4: 'REFERENTIAL_CONSTRAINT', 5: 'VIEW_CHECK', 6: 'IDENTITY_GENERATOR'}) + {0: 'USER', 1: 'SYSTEM', 2: 'QLI', 3: 'CHECK_CONSTRAINT', + 4: 'REFERENTIAL_CONSTRAINT', 5: 'VIEW_CHECK', 6: 'IDENTITY_GENERATOR'}) self.assertDictEqual(s.enum_transaction_state_types, {1: 'LIMBO', 2: 'COMMITTED', 3: 'ROLLED_BACK'}) if self.con.ods <= fdb.ODS_FB_20: self.assertDictEqual(s.enum_trigger_types, - {1: 'PRE_STORE', 2: 'POST_STORE', 3: 'PRE_MODIFY', - 4: 'POST_MODIFY', 5: 'PRE_ERASE', 6: 'POST_ERASE'}) + {1: 'PRE_STORE', 2: 'POST_STORE', 3: 'PRE_MODIFY', + 4: 'POST_MODIFY', 5: 'PRE_ERASE', 6: 'POST_ERASE'}) elif self.con.ods > fdb.ODS_FB_20: self.assertDictEqual(s.enum_trigger_types, - {8192: 'CONNECT', 1: 'PRE_STORE', 2: 'POST_STORE', - 3: 'PRE_MODIFY', 4: 'POST_MODIFY', 5: 'PRE_ERASE', - 6: 'POST_ERASE', 8193: 'DISCONNECT', 8194: 'TRANSACTION_START', - 8195: 'TRANSACTION_COMMIT', 8196: 'TRANSACTION_ROLLBACK'}) + {8192: 'CONNECT', 1: 'PRE_STORE', 2: 'POST_STORE', + 3: 'PRE_MODIFY', 4: 'POST_MODIFY', 5: 'PRE_ERASE', + 6: 'POST_ERASE', 8193: 'DISCONNECT', 8194: 'TRANSACTION_START', + 8195: 'TRANSACTION_COMMIT', 8196: 'TRANSACTION_ROLLBACK'}) if self.con.ods >= fdb.ODS_FB_30: self.assertDictEqual(s.enum_parameter_types, - {0: 'INPUT', 1: 'OUTPUT'}) + {0: 'INPUT', 1: 'OUTPUT'}) self.assertDictEqual(s.enum_index_activity_flags, - {0: 'ACTIVE', 1: 'INACTIVE'}) + {0: 'ACTIVE', 1: 'INACTIVE'}) self.assertDictEqual(s.enum_index_unique_flags, - {0: 'NON_UNIQUE', 1: 'UNIQUE'}) + {0: 'NON_UNIQUE', 1: 'UNIQUE'}) self.assertDictEqual(s.enum_trigger_activity_flags, - {0: 'ACTIVE', 1: 'INACTIVE'}) + {0: 'ACTIVE', 1: 'INACTIVE'}) self.assertDictEqual(s.enum_grant_options, - {0: 'NONE', 1: 'GRANT_OPTION', 2: 'ADMIN_OPTION'}) + {0: 'NONE', 1: 'GRANT_OPTION', 2: 'ADMIN_OPTION'}) self.assertDictEqual(s.enum_page_types, - {1: 'HEADER', 2: 'PAGE_INVENTORY', 3: 'TRANSACTION_INVENTORY', - 4: 'POINTER', 5: 'DATA', 6: 'INDEX_ROOT', 7: 'INDEX_BUCKET', - 8: 'BLOB', 9: 'GENERATOR', 10: 'SCN_INVENTORY'}) + {1: 'HEADER', 2: 'PAGE_INVENTORY', 3: 'TRANSACTION_INVENTORY', + 4: 'POINTER', 5: 'DATA', 6: 'INDEX_ROOT', 7: 'INDEX_BUCKET', + 8: 'BLOB', 9: 'GENERATOR', 10: 'SCN_INVENTORY'}) self.assertDictEqual(s.enum_privacy_flags, - {0: 'PUBLIC', 1: 'PRIVATE'}) + {0: 'PUBLIC', 1: 'PRIVATE'}) self.assertDictEqual(s.enum_legacy_flags, - {0: 'NEW_STYLE', 1: 'LEGACY_STYLE'}) + {0: 'NEW_STYLE', 1: 'LEGACY_STYLE'}) self.assertDictEqual(s.enum_determinism_flags, - {0: 'NON_DETERMINISTIC', 1: 'DETERMINISTIC'}) + {0: 'NON_DETERMINISTIC', 1: 'DETERMINISTIC'}) # properties self.assertIsNone(s.description) - self.assertEqual(s.owner_name,'SYSDBA') - self.assertEqual(s.default_character_set.name,'NONE') + self.assertEqual(s.owner_name, 'SYSDBA') + self.assertEqual(s.default_character_set.name, 'NONE') if self.con.ods < fdb.ODS_FB_30: self.assertIsNone(s.security_class) else: - self.assertEqual(s.security_class,'SQL$363') + self.assertEqual(s.security_class, 'SQL$363') # Lists of db objects - self.assertIsInstance(s.collations,list) - self.assertIsInstance(s.character_sets,list) - self.assertIsInstance(s.exceptions,list) - self.assertIsInstance(s.generators,list) - self.assertIsInstance(s.sysgenerators,list) - self.assertIsInstance(s.sequences,list) - self.assertIsInstance(s.syssequences,list) - self.assertIsInstance(s.domains,list) - self.assertIsInstance(s.sysdomains,list) - self.assertIsInstance(s.indices,list) - self.assertIsInstance(s.sysindices,list) - self.assertIsInstance(s.tables,list) - self.assertIsInstance(s.systables,list) - self.assertIsInstance(s.views,list) - self.assertIsInstance(s.sysviews,list) - self.assertIsInstance(s.triggers,list) - self.assertIsInstance(s.systriggers,list) - self.assertIsInstance(s.procedures,list) - self.assertIsInstance(s.sysprocedures,list) - self.assertIsInstance(s.constraints,list) - self.assertIsInstance(s.roles,list) - self.assertIsInstance(s.dependencies,list) - self.assertIsInstance(s.functions,list) - self.assertIsInstance(s.files,list) + self.assertIsInstance(s.collations, list) + self.assertIsInstance(s.character_sets, list) + self.assertIsInstance(s.exceptions, list) + self.assertIsInstance(s.generators, list) + self.assertIsInstance(s.sysgenerators, list) + self.assertIsInstance(s.sequences, list) + self.assertIsInstance(s.syssequences, list) + self.assertIsInstance(s.domains, list) + self.assertIsInstance(s.sysdomains, list) + self.assertIsInstance(s.indices, list) + self.assertIsInstance(s.sysindices, list) + self.assertIsInstance(s.tables, list) + self.assertIsInstance(s.systables, list) + self.assertIsInstance(s.views, list) + self.assertIsInstance(s.sysviews, list) + self.assertIsInstance(s.triggers, list) + self.assertIsInstance(s.systriggers, list) + self.assertIsInstance(s.procedures, list) + self.assertIsInstance(s.sysprocedures, list) + self.assertIsInstance(s.constraints, list) + self.assertIsInstance(s.roles, list) + self.assertIsInstance(s.dependencies, list) + self.assertIsInstance(s.functions, list) + self.assertIsInstance(s.files, list) s.reload() if self.con.ods <= fdb.ODS_FB_20: - self.assertEqual(len(s.collations),138) + self.assertEqual(len(s.collations), 138) elif self.con.ods == fdb.ODS_FB_21: - self.assertEqual(len(s.collations),146) + self.assertEqual(len(s.collations), 146) elif self.con.ods == fdb.ODS_FB_25: - self.assertEqual(len(s.collations),149) + self.assertEqual(len(s.collations), 149) elif self.con.ods >= fdb.ODS_FB_30: - self.assertEqual(len(s.collations),150) + self.assertEqual(len(s.collations), 150) if self.con.ods <= fdb.ODS_FB_20: - self.assertEqual(len(s.character_sets),48) + self.assertEqual(len(s.character_sets), 48) elif self.con.ods == fdb.ODS_FB_21: - self.assertEqual(len(s.character_sets),51) + self.assertEqual(len(s.character_sets), 51) elif self.con.ods >= fdb.ODS_FB_25: - self.assertEqual(len(s.character_sets),52) - self.assertEqual(len(s.exceptions),5) - self.assertEqual(len(s.generators),2) + self.assertEqual(len(s.character_sets), 52) + self.assertEqual(len(s.exceptions), 5) + self.assertEqual(len(s.generators), 2) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(s.sysgenerators),9) - self.assertEqual(len(s.syssequences),9) + self.assertEqual(len(s.sysgenerators), 9) + self.assertEqual(len(s.syssequences), 9) else: - self.assertEqual(len(s.sysgenerators),13) - self.assertEqual(len(s.syssequences),13) - self.assertEqual(len(s.sequences),2) - self.assertEqual(len(s.domains),15) + self.assertEqual(len(s.sysgenerators), 13) + self.assertEqual(len(s.syssequences), 13) + self.assertEqual(len(s.sequences), 2) + self.assertEqual(len(s.domains), 15) if self.con.ods <= fdb.ODS_FB_20: - self.assertEqual(len(s.sysdomains),203) + self.assertEqual(len(s.sysdomains), 203) elif self.con.ods == fdb.ODS_FB_21: - self.assertEqual(len(s.sysdomains),227) + self.assertEqual(len(s.sysdomains), 227) elif self.con.ods == fdb.ODS_FB_25: - self.assertEqual(len(s.sysdomains),230) + self.assertEqual(len(s.sysdomains), 230) elif self.con.ods == fdb.ODS_FB_30: - self.assertEqual(len(s.sysdomains),275) + self.assertEqual(len(s.sysdomains), 277) else: - self.assertEqual(len(s.sysdomains),247) - self.assertEqual(len(s.indices),12) + self.assertEqual(len(s.sysdomains), 247) + self.assertEqual(len(s.indices), 12) if self.con.ods <= fdb.ODS_FB_21: - self.assertEqual(len(s.sysindices),72) + self.assertEqual(len(s.sysindices), 72) elif self.con.ods == fdb.ODS_FB_25: - self.assertEqual(len(s.sysindices),75) + self.assertEqual(len(s.sysindices), 76) elif self.con.ods == fdb.ODS_FB_30: - self.assertEqual(len(s.sysindices),81) + self.assertEqual(len(s.sysindices), 82) else: - self.assertEqual(len(s.sysindices),78) + self.assertEqual(len(s.sysindices), 78) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(s.tables),15) + self.assertEqual(len(s.tables), 15) else: - self.assertEqual(len(s.tables),16) + self.assertEqual(len(s.tables), 16) if self.con.ods <= fdb.ODS_FB_20: - self.assertEqual(len(s.systables),33) + self.assertEqual(len(s.systables), 33) elif self.con.ods == fdb.ODS_FB_21: - self.assertEqual(len(s.systables),40) + self.assertEqual(len(s.systables), 40) elif self.con.ods == fdb.ODS_FB_25: - self.assertEqual(len(s.systables),42) + self.assertEqual(len(s.systables), 42) elif self.con.ods == fdb.ODS_FB_30: - self.assertEqual(len(s.systables),50) + self.assertEqual(len(s.systables), 50) else: - self.assertEqual(len(s.systables),44) - self.assertEqual(len(s.views),1) - self.assertEqual(len(s.sysviews),0) - self.assertEqual(len(s.triggers),6) + self.assertEqual(len(s.systables), 44) + self.assertEqual(len(s.views), 1) + self.assertEqual(len(s.sysviews), 0) + self.assertEqual(len(s.triggers), 6) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(s.systriggers),63) + self.assertEqual(len(s.systriggers), 63) else: - self.assertEqual(len(s.systriggers),57) + self.assertEqual(len(s.systriggers), 57) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(s.procedures),10) + self.assertEqual(len(s.procedures), 10) else: - self.assertEqual(len(s.procedures),11) - self.assertEqual(len(s.sysprocedures),0) + self.assertEqual(len(s.procedures), 11) + self.assertEqual(len(s.sysprocedures), 0) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(s.constraints),80) + self.assertEqual(len(s.constraints), 82) else: - self.assertEqual(len(s.constraints),108) + self.assertEqual(len(s.constraints), 110) if self.con.ods <= fdb.ODS_FB_21: - self.assertEqual(len(s.roles),1) + self.assertEqual(len(s.roles), 1) elif self.con.ods >= fdb.ODS_FB_25: - self.assertEqual(len(s.roles),2) + self.assertEqual(len(s.roles), 2) if self.con.ods <= fdb.ODS_FB_20: - self.assertEqual(len(s.dependencies),163) + self.assertEqual(len(s.dependencies), 163) elif self.con.ods == fdb.ODS_FB_21: - self.assertEqual(len(s.dependencies),157) + self.assertEqual(len(s.dependencies), 157) elif self.con.ods == fdb.ODS_FB_25: - self.assertEqual(len(s.dependencies),163) + self.assertEqual(len(s.dependencies), 163) elif self.con.ods >= fdb.ODS_FB_30: - self.assertEqual(len(s.dependencies),168) + self.assertEqual(len(s.dependencies), 168) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(s.functions),0) + self.assertEqual(len(s.functions), 0) else: - self.assertEqual(len(s.functions),6) + self.assertEqual(len(s.functions), 6) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(s.sysfunctions),2) + self.assertEqual(len(s.sysfunctions), 2) else: - self.assertEqual(len(s.sysfunctions),0) - self.assertEqual(len(s.files),0) + self.assertEqual(len(s.sysfunctions), 0) + self.assertEqual(len(s.files), 0) # - self.assertIsInstance(s.collations[0],sm.Collation) - self.assertIsInstance(s.character_sets[0],sm.CharacterSet) - self.assertIsInstance(s.exceptions[0],sm.DatabaseException) - self.assertIsInstance(s.generators[0],sm.Sequence) - self.assertIsInstance(s.sysgenerators[0],sm.Sequence) - self.assertIsInstance(s.sequences[0],sm.Sequence) - self.assertIsInstance(s.syssequences[0],sm.Sequence) - self.assertIsInstance(s.domains[0],sm.Domain) - self.assertIsInstance(s.sysdomains[0],sm.Domain) - self.assertIsInstance(s.indices[0],sm.Index) - self.assertIsInstance(s.sysindices[0],sm.Index) - self.assertIsInstance(s.tables[0],sm.Table) - self.assertIsInstance(s.systables[0],sm.Table) - self.assertIsInstance(s.views[0],sm.View) - #self.assertIsInstance(s.sysviews[0],sm.View) - self.assertIsInstance(s.triggers[0],sm.Trigger) - self.assertIsInstance(s.systriggers[0],sm.Trigger) - self.assertIsInstance(s.procedures[0],sm.Procedure) - #self.assertIsInstance(s.sysprocedures[0],sm.Procedure) - self.assertIsInstance(s.constraints[0],sm.Constraint) - #self.assertIsInstance(s.roles[0],sm.Role) - self.assertIsInstance(s.dependencies[0],sm.Dependency) + self.assertIsInstance(s.collations[0], sm.Collation) + self.assertIsInstance(s.character_sets[0], sm.CharacterSet) + self.assertIsInstance(s.exceptions[0], sm.DatabaseException) + self.assertIsInstance(s.generators[0], sm.Sequence) + self.assertIsInstance(s.sysgenerators[0], sm.Sequence) + self.assertIsInstance(s.sequences[0], sm.Sequence) + self.assertIsInstance(s.syssequences[0], sm.Sequence) + self.assertIsInstance(s.domains[0], sm.Domain) + self.assertIsInstance(s.sysdomains[0], sm.Domain) + self.assertIsInstance(s.indices[0], sm.Index) + self.assertIsInstance(s.sysindices[0], sm.Index) + self.assertIsInstance(s.tables[0], sm.Table) + self.assertIsInstance(s.systables[0], sm.Table) + self.assertIsInstance(s.views[0], sm.View) + if len(s.sysviews) > 0: + self.assertIsInstance(s.sysviews[0],sm.View) + self.assertIsInstance(s.triggers[0], sm.Trigger) + self.assertIsInstance(s.systriggers[0], sm.Trigger) + self.assertIsInstance(s.procedures[0], sm.Procedure) + if len(s.sysprocedures) > 0: + self.assertIsInstance(s.sysprocedures[0],sm.Procedure) + self.assertIsInstance(s.constraints[0], sm.Constraint) + if len(s.roles) > 0: + self.assertIsInstance(s.roles[0],sm.Role) + self.assertIsInstance(s.dependencies[0], sm.Dependency) if self.con.ods < fdb.ODS_FB_30: - self.assertIsInstance(s.sysfunctions[0],sm.Function) - #self.assertIsInstance(s.files[0],sm.DatabaseFile) - # - self.assertEqual(s.get_collation('OCTETS').name,'OCTETS') - self.assertEqual(s.get_character_set('WIN1250').name,'WIN1250') - self.assertEqual(s.get_exception('UNKNOWN_EMP_ID').name,'UNKNOWN_EMP_ID') - self.assertEqual(s.get_generator('EMP_NO_GEN').name,'EMP_NO_GEN') - self.assertEqual(s.get_sequence('EMP_NO_GEN').name,'EMP_NO_GEN') - self.assertEqual(s.get_index('MINSALX').name,'MINSALX') - self.assertEqual(s.get_domain('FIRSTNAME').name,'FIRSTNAME') - self.assertEqual(s.get_table('COUNTRY').name,'COUNTRY') - self.assertEqual(s.get_view('PHONE_LIST').name,'PHONE_LIST') - self.assertEqual(s.get_trigger('SET_EMP_NO').name,'SET_EMP_NO') - self.assertEqual(s.get_procedure('GET_EMP_PROJ').name,'GET_EMP_PROJ') - self.assertEqual(s.get_constraint('INTEG_1').name,'INTEG_1') + self.assertIsInstance(s.sysfunctions[0], sm.Function) + if len(s.files) > 0: + self.assertIsInstance(s.files[0],sm.DatabaseFile) + # + self.assertEqual(s.get_collation('OCTETS').name, 'OCTETS') + self.assertEqual(s.get_character_set('WIN1250').name, 'WIN1250') + self.assertEqual(s.get_exception('UNKNOWN_EMP_ID').name, 'UNKNOWN_EMP_ID') + self.assertEqual(s.get_generator('EMP_NO_GEN').name, 'EMP_NO_GEN') + self.assertEqual(s.get_sequence('EMP_NO_GEN').name, 'EMP_NO_GEN') + self.assertEqual(s.get_index('MINSALX').name, 'MINSALX') + self.assertEqual(s.get_domain('FIRSTNAME').name, 'FIRSTNAME') + self.assertEqual(s.get_table('COUNTRY').name, 'COUNTRY') + self.assertEqual(s.get_view('PHONE_LIST').name, 'PHONE_LIST') + self.assertEqual(s.get_trigger('SET_EMP_NO').name, 'SET_EMP_NO') + self.assertEqual(s.get_procedure('GET_EMP_PROJ').name, 'GET_EMP_PROJ') + self.assertEqual(s.get_constraint('INTEG_1').name, 'INTEG_1') #self.assertEqual(s.get_role('X').name,'X') if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(s.get_function('RDB$GET_CONTEXT').name,'RDB$GET_CONTEXT') - self.assertEqual(s.get_collation_by_id(0,0).name,'NONE') - self.assertEqual(s.get_character_set_by_id(0).name,'NONE') + self.assertEqual(s.get_function('RDB$GET_CONTEXT').name, 'RDB$GET_CONTEXT') + self.assertEqual(s.get_collation_by_id(0, 0).name, 'NONE') + self.assertEqual(s.get_character_set_by_id(0).name, 'NONE') self.assertFalse(s.ismultifile()) # self.assertFalse(s.closed) @@ -2589,24 +2662,24 @@ # System collation c = self.con.schema.get_collation('ES_ES') # common properties - self.assertEqual(c.name,'ES_ES') + self.assertEqual(c.name, 'ES_ES') self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, ['comment']) self.assertTrue(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'ES_ES') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'ES_ES') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) if self.con.ods < fdb.ODS_FB_30: self.assertIsNone(c.security_class) self.assertIsNone(c.owner_name) else: - self.assertEqual(c.security_class,'SQL$263') - self.assertEqual(c.owner_name,'SYSDBA') + self.assertEqual(c.security_class, 'SQL$263') + self.assertEqual(c.owner_name, 'SYSDBA') # - self.assertEqual(c.id,10) - self.assertEqual(c.character_set.name,'ISO8859_1') + self.assertEqual(c.id, 10) + self.assertEqual(c.character_set.name, 'ISO8859_1') self.assertIsNone(c.base_collation) - self.assertEqual(c.attributes,1) + self.assertEqual(c.attributes, 1) if self.con.ods <= fdb.ODS_FB_20: self.assertIsNone(c.specific_attributes) elif self.con.ods > fdb.ODS_FB_20: @@ -2620,55 +2693,57 @@ # 'DISABLE-COMPRESSIONS=0;DISABLE-EXPANSIONS=0' c = self.con.schema.get_collation('TEST_COLLATE') # common properties - self.assertEqual(c.name,'TEST_COLLATE') + self.assertEqual(c.name, 'TEST_COLLATE') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['create', 'drop']) + self.assertListEqual(c.actions, ['comment', 'create', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'TEST_COLLATE') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) - # - self.assertEqual(c.id,126) - self.assertEqual(c.character_set.name,'WIN1250') - self.assertEqual(c.base_collation.name,'WIN_CZ') - self.assertEqual(c.attributes,6) + self.assertEqual(c.get_quoted_name(), 'TEST_COLLATE') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) + # + self.assertEqual(c.id, 126) + self.assertEqual(c.character_set.name, 'WIN1250') + self.assertEqual(c.base_collation.name, 'WIN_CZ') + self.assertEqual(c.attributes, 6) self.assertEqual(c.specific_attributes, 'DISABLE-COMPRESSIONS=0;DISABLE-EXPANSIONS=0') self.assertIsNone(c.function_name) self.assertEqual(c.get_sql_for('create'), -"""CREATE COLLATION TEST_COLLATE + """CREATE COLLATION TEST_COLLATE FOR WIN1250 FROM WIN_CZ NO PAD CASE INSENSITIVE ACCENT INSENSITIVE 'DISABLE-COMPRESSIONS=0;DISABLE-EXPANSIONS=0'""") - self.assertEqual(c.get_sql_for('drop'),"DROP COLLATION TEST_COLLATE") + self.assertEqual(c.get_sql_for('drop'), "DROP COLLATION TEST_COLLATE") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('drop',badparam='') + c.get_sql_for('drop', badparam='') self.assertTupleEqual(cm.exception.args, - ("Unsupported parameter(s) 'badparam'",)) + ("Unsupported parameter(s) 'badparam'",)) + self.assertEqual(c.get_sql_for('comment'), + "COMMENT ON COLLATION TEST_COLLATE IS NULL") def testCharacterSet(self): c = self.con.schema.get_character_set('UTF8') # common properties - self.assertEqual(c.name,'UTF8') + self.assertEqual(c.name, 'UTF8') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['alter']) + self.assertListEqual(c.actions, ['alter', 'comment']) self.assertTrue(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'UTF8') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'UTF8') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) if self.con.ods < fdb.ODS_FB_30: self.assertIsNone(c.security_class) self.assertIsNone(c.owner_name) else: - self.assertEqual(c.security_class,'SQL$166') - self.assertEqual(c.owner_name,'SYSDBA') + self.assertEqual(c.security_class, 'SQL$166') + self.assertEqual(c.owner_name, 'SYSDBA') # - self.assertEqual(c.id,4) - self.assertEqual(c.bytes_per_character,4) - self.assertEqual(c.default_collate.name,'UTF8') + self.assertEqual(c.id, 4) + self.assertEqual(c.bytes_per_character, 4) + self.assertEqual(c.default_collate.name, 'UTF8') if self.con.ods <= fdb.ODS_FB_20: self.assertListEqual([x.name for x in c.collations], ['UTF8', 'UCS_BASIC', 'UNICODE']) @@ -2677,166 +2752,177 @@ ['UTF8', 'UCS_BASIC', 'UNICODE', 'UNICODE_CI']) elif self.con.ods >= fdb.ODS_FB_25: self.assertListEqual([x.name for x in c.collations], - ['UTF8', 'UCS_BASIC', 'UNICODE', 'UNICODE_CI', 'UNICODE_CI_AI']) + ['UTF8', 'UCS_BASIC', 'UNICODE', 'UNICODE_CI', 'UNICODE_CI_AI']) # - self.assertEqual(c.get_sql_for('alter',collation='UCS_BASIC'), - "ALTER CHARACTER SET UTF8 SET DEFAULT COLLATION UCS_BASIC") + self.assertEqual(c.get_sql_for('alter', collation='UCS_BASIC'), + "ALTER CHARACTER SET UTF8 SET DEFAULT COLLATION UCS_BASIC") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',badparam='UCS_BASIC') + c.get_sql_for('alter', badparam='UCS_BASIC') self.assertTupleEqual(cm.exception.args, - ("Unsupported parameter(s) 'badparam'",)) + ("Unsupported parameter(s) 'badparam'",)) with self.assertRaises(fdb.ProgrammingError) as cm: c.get_sql_for('alter') self.assertTupleEqual(cm.exception.args, - ("Missing required parameter: 'collation'.",)) + ("Missing required parameter: 'collation'.",)) # - self.assertEqual(c.get_collation('UCS_BASIC').name,'UCS_BASIC') + self.assertEqual(c.get_sql_for('comment'), + 'COMMENT ON CHARACTER SET UTF8 IS NULL') + # + self.assertEqual(c.get_collation('UCS_BASIC').name, 'UCS_BASIC') self.assertEqual(c.get_collation_by_id(c.get_collation('UCS_BASIC').id).name, 'UCS_BASIC') def testException(self): c = self.con.schema.get_exception('UNKNOWN_EMP_ID') # common properties - self.assertEqual(c.name,'UNKNOWN_EMP_ID') + self.assertEqual(c.name, 'UNKNOWN_EMP_ID') self.assertIsNone(c.description) self.assertListEqual(c.actions, - ['create', 'recreate', 'alter', 'create_or_alter', 'drop']) + ['comment', 'create', 'recreate', 'alter', 'create_or_alter', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'UNKNOWN_EMP_ID') + self.assertEqual(c.get_quoted_name(), 'UNKNOWN_EMP_ID') d = c.get_dependents() - self.assertEqual(len(d),1) + self.assertEqual(len(d), 1) d = d[0] - self.assertEqual(d.dependent_name,'ADD_EMP_PROJ') - self.assertEqual(d.dependent_type,5) - self.assertIsInstance(d.dependent,sm.Procedure) - self.assertEqual(d.depended_on_name,'UNKNOWN_EMP_ID') - self.assertEqual(d.depended_on_type,7) - self.assertIsInstance(d.depended_on,sm.DatabaseException) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(d.dependent_name, 'ADD_EMP_PROJ') + self.assertEqual(d.dependent_type, 5) + self.assertIsInstance(d.dependent, sm.Procedure) + self.assertEqual(d.depended_on_name, 'UNKNOWN_EMP_ID') + self.assertEqual(d.depended_on_type, 7) + self.assertIsInstance(d.depended_on, sm.DatabaseException) + self.assertListEqual(c.get_dependencies(), []) if self.con.ods < fdb.ODS_FB_30: self.assertIsNone(c.security_class) self.assertIsNone(c.owner_name) else: - self.assertEqual(c.security_class,'SQL$476') - self.assertEqual(c.owner_name,'SYSDBA') + self.assertEqual(c.security_class, 'SQL$476') + self.assertEqual(c.owner_name, 'SYSDBA') # - self.assertEqual(c.id,1) - self.assertEqual(c.message,"Invalid employee number or project id.") + self.assertEqual(c.id, 1) + self.assertEqual(c.message, "Invalid employee number or project id.") # self.assertEqual(c.get_sql_for('create'), - "CREATE EXCEPTION UNKNOWN_EMP_ID 'Invalid employee number or project id.'") + "CREATE EXCEPTION UNKNOWN_EMP_ID 'Invalid employee number or project id.'") self.assertEqual(c.get_sql_for('recreate'), - "RECREATE EXCEPTION UNKNOWN_EMP_ID 'Invalid employee number or project id.'") + "RECREATE EXCEPTION UNKNOWN_EMP_ID 'Invalid employee number or project id.'") self.assertEqual(c.get_sql_for('drop'), - "DROP EXCEPTION UNKNOWN_EMP_ID") - self.assertEqual(c.get_sql_for('alter',message="New message."), + "DROP EXCEPTION UNKNOWN_EMP_ID") + self.assertEqual(c.get_sql_for('alter', message="New message."), "ALTER EXCEPTION UNKNOWN_EMP_ID 'New message.'") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',badparam="New message.") + c.get_sql_for('alter', badparam="New message.") self.assertTupleEqual(cm.exception.args, - ("Unsupported parameter(s) 'badparam'",)) + ("Unsupported parameter(s) 'badparam'",)) with self.assertRaises(fdb.ProgrammingError) as cm: c.get_sql_for('alter') self.assertTupleEqual(cm.exception.args, - ("Missing required parameter: 'message'.",)) + ("Missing required parameter: 'message'.",)) self.assertEqual(c.get_sql_for('create_or_alter'), - "CREATE OR ALTER EXCEPTION UNKNOWN_EMP_ID 'Invalid employee number or project id.'") + "CREATE OR ALTER EXCEPTION UNKNOWN_EMP_ID 'Invalid employee number or project id.'") + self.assertEqual(c.get_sql_for('comment'), + "COMMENT ON EXCEPTION UNKNOWN_EMP_ID IS NULL") def testSequence(self): # System generator c = self.con.schema.get_sequence('RDB$FIELD_NAME') # common properties - self.assertEqual(c.name,'RDB$FIELD_NAME') - self.assertEqual(c.description,"Implicit domain name") - self.assertListEqual(c.actions,[]) + self.assertEqual(c.name, 'RDB$FIELD_NAME') + self.assertEqual(c.description, "Implicit domain name") + self.assertListEqual(c.actions, ['comment']) self.assertTrue(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'RDB$FIELD_NAME') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'RDB$FIELD_NAME') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) # - self.assertEqual(c.id,6) + self.assertEqual(c.id, 6) # User generator c = self.con.schema.get_generator('EMP_NO_GEN') # common properties - self.assertEqual(c.name,'EMP_NO_GEN') + self.assertEqual(c.name, 'EMP_NO_GEN') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['create', 'alter', 'drop']) + self.assertListEqual(c.actions, ['comment', 'create', + 'alter', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'EMP_NO_GEN') + self.assertEqual(c.get_quoted_name(), 'EMP_NO_GEN') d = c.get_dependents() - self.assertEqual(len(d),1) + self.assertEqual(len(d), 1) d = d[0] - self.assertEqual(d.dependent_name,'SET_EMP_NO') - self.assertEqual(d.dependent_type,2) - self.assertIsInstance(d.dependent,sm.Trigger) - self.assertEqual(d.depended_on_name,'EMP_NO_GEN') - self.assertEqual(d.depended_on_type,14) - self.assertIsInstance(d.depended_on,sm.Sequence) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(d.dependent_name, 'SET_EMP_NO') + self.assertEqual(d.dependent_type, 2) + self.assertIsInstance(d.dependent, sm.Trigger) + self.assertEqual(d.depended_on_name, 'EMP_NO_GEN') + self.assertEqual(d.depended_on_type, 14) + self.assertIsInstance(d.depended_on, sm.Sequence) + self.assertListEqual(c.get_dependencies(), []) # if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(c.id,10) + self.assertEqual(c.id, 10) self.assertIsNone(c.security_class) self.assertIsNone(c.owner_name) self.assertIsNone(c.inital_value) self.assertIsNone(c.increment) else: - self.assertEqual(c.id,12) - self.assertEqual(c.security_class,'SQL$429') - self.assertEqual(c.owner_name,'SYSDBA') - self.assertEqual(c.inital_value,0) - self.assertEqual(c.increment,1) - self.assertEqual(c.value,145) - # - self.assertEqual(c.get_sql_for('create'),"CREATE SEQUENCE EMP_NO_GEN") - self.assertEqual(c.get_sql_for('drop'),"DROP SEQUENCE EMP_NO_GEN") - self.assertEqual(c.get_sql_for('alter',value=10), + self.assertEqual(c.id, 12) + self.assertEqual(c.security_class, 'SQL$429') + self.assertEqual(c.owner_name, 'SYSDBA') + self.assertEqual(c.inital_value, 0) + self.assertEqual(c.increment, 1) + self.assertEqual(c.value, 145) + # + self.assertEqual(c.get_sql_for('create'), "CREATE SEQUENCE EMP_NO_GEN") + self.assertEqual(c.get_sql_for('drop'), "DROP SEQUENCE EMP_NO_GEN") + self.assertEqual(c.get_sql_for('alter', value=10), "ALTER SEQUENCE EMP_NO_GEN RESTART WITH 10") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',badparam=10) + c.get_sql_for('alter', badparam=10) self.assertTupleEqual(cm.exception.args, - ("Unsupported parameter(s) 'badparam'",)) + ("Unsupported parameter(s) 'badparam'",)) with self.assertRaises(fdb.ProgrammingError) as cm: c.get_sql_for('alter') self.assertTupleEqual(cm.exception.args, - ("Missing required parameter: 'value'.",)) + ("Missing required parameter: 'value'.",)) + self.assertEqual(c.get_sql_for('comment'), + "COMMENT ON SEQUENCE EMP_NO_GEN IS NULL") + c.schema.opt_generator_keyword = 'GENERATOR' + self.assertEqual(c.get_sql_for('comment'), + "COMMENT ON GENERATOR EMP_NO_GEN IS NULL") def testTableColumn(self): # System column c = self.con.schema.get_table('RDB$PAGES').get_column('RDB$PAGE_NUMBER') # common properties - self.assertEqual(c.name,'RDB$PAGE_NUMBER') + self.assertEqual(c.name, 'RDB$PAGE_NUMBER') self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, ['comment']) self.assertTrue(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'RDB$PAGE_NUMBER') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'RDB$PAGE_NUMBER') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) self.assertFalse(c.isidentity()) self.assertIsNone(c.generator) # User column c = self.con.schema.get_table('DEPARTMENT').get_column('PHONE_NO') # common properties - self.assertEqual(c.name,'PHONE_NO') + self.assertEqual(c.name, 'PHONE_NO') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['alter', 'drop']) + self.assertListEqual(c.actions, ['comment', 'alter', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'PHONE_NO') + self.assertEqual(c.get_quoted_name(), 'PHONE_NO') d = c.get_dependents() - self.assertEqual(len(d),1) + self.assertEqual(len(d), 1) d = d[0] - self.assertEqual(d.dependent_name,'PHONE_LIST') - self.assertEqual(d.dependent_type,1) - self.assertIsInstance(d.dependent,sm.View) - self.assertEqual(d.depended_on_name,'DEPARTMENT') - self.assertEqual(d.depended_on_type,0) - self.assertIsInstance(d.depended_on,sm.TableColumn) - self.assertListEqual(c.get_dependencies(),[]) - # - self.assertEqual(c.table.name,'DEPARTMENT') - self.assertEqual(c.domain.name,'PHONENUMBER') - self.assertEqual(c.position,6) + self.assertEqual(d.dependent_name, 'PHONE_LIST') + self.assertEqual(d.dependent_type, 1) + self.assertIsInstance(d.dependent, sm.View) + self.assertEqual(d.depended_on_name, 'DEPARTMENT') + self.assertEqual(d.depended_on_type, 0) + self.assertIsInstance(d.depended_on, sm.TableColumn) + self.assertListEqual(c.get_dependencies(), []) + # + self.assertEqual(c.table.name, 'DEPARTMENT') + self.assertEqual(c.domain.name, 'PHONENUMBER') + self.assertEqual(c.position, 6) self.assertIsNone(c.security_class) - self.assertEqual(c.default,"'555-1234'") + self.assertEqual(c.default, "'555-1234'") self.assertIsNone(c.collation) - self.assertEqual(c.datatype,'VARCHAR(20)') + self.assertEqual(c.datatype, 'VARCHAR(20)') # self.assertTrue(c.isnullable()) self.assertFalse(c.iscomputed()) @@ -2844,101 +2930,102 @@ self.assertTrue(c.has_default()) self.assertIsNone(c.get_computedby()) # + self.assertEqual(c.get_sql_for('comment'), + "COMMENT ON COLUMN DEPARTMENT.PHONE_NO IS NULL") self.assertEqual(c.get_sql_for('drop'), "ALTER TABLE DEPARTMENT DROP PHONE_NO") - self.assertEqual(c.get_sql_for('alter',name='NewName'), - 'ALTER TABLE DEPARTMENT ALTER COLUMN PHONE_NO TO "NewName"') - self.assertEqual(c.get_sql_for('alter',position=2), - "ALTER TABLE DEPARTMENT ALTER COLUMN PHONE_NO POSITION 2") - self.assertEqual(c.get_sql_for('alter',datatype='VARCHAR(25)'), - "ALTER TABLE DEPARTMENT ALTER COLUMN PHONE_NO TYPE VARCHAR(25)") + self.assertEqual(c.get_sql_for('alter', name='NewName'), + 'ALTER TABLE DEPARTMENT ALTER COLUMN PHONE_NO TO "NewName"') + self.assertEqual(c.get_sql_for('alter', position=2), + "ALTER TABLE DEPARTMENT ALTER COLUMN PHONE_NO POSITION 2") + self.assertEqual(c.get_sql_for('alter', datatype='VARCHAR(25)'), + "ALTER TABLE DEPARTMENT ALTER COLUMN PHONE_NO TYPE VARCHAR(25)") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',badparam=10) - self.assertTupleEqual(cm.exception.args,("Unsupported parameter(s) 'badparam'",)) + c.get_sql_for('alter', badparam=10) + self.assertTupleEqual(cm.exception.args, ("Unsupported parameter(s) 'badparam'",)) with self.assertRaises(fdb.ProgrammingError) as cm: c.get_sql_for('alter') - self.assertTupleEqual(cm.exception.args,("Parameter required.",)) + self.assertTupleEqual(cm.exception.args, ("Parameter required.",)) with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',expression='(1+1)') + c.get_sql_for('alter', expression='(1+1)') self.assertTupleEqual(cm.exception.args, - ("Change from persistent column to computed is not allowed.",)) + ("Change from persistent column to computed is not allowed.",)) # Computed column c = self.con.schema.get_table('EMPLOYEE').get_column('FULL_NAME') self.assertTrue(c.isnullable()) self.assertTrue(c.iscomputed()) self.assertFalse(c.isdomainbased()) self.assertFalse(c.has_default()) - self.assertEqual(c.get_computedby(),"(last_name || ', ' || first_name)") + self.assertEqual(c.get_computedby(), "(last_name || ', ' || first_name)") if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(c.datatype,'VARCHAR(0)') + self.assertEqual(c.datatype, 'VARCHAR(0)') else: - self.assertEqual(c.datatype,'VARCHAR(37)') + self.assertEqual(c.datatype, 'VARCHAR(37)') # - self.assertEqual(c.get_sql_for('alter',datatype='VARCHAR(50)', + self.assertEqual(c.get_sql_for('alter', datatype='VARCHAR(50)', expression="(first_name || ', ' || last_name)"), - "ALTER TABLE EMPLOYEE ALTER COLUMN FULL_NAME TYPE VARCHAR(50) " \ - "COMPUTED BY (first_name || ', ' || last_name)") + "ALTER TABLE EMPLOYEE ALTER COLUMN FULL_NAME TYPE VARCHAR(50) " \ + "COMPUTED BY (first_name || ', ' || last_name)") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',datatype='VARCHAR(50)') + c.get_sql_for('alter', datatype='VARCHAR(50)') self.assertTupleEqual(cm.exception.args, - ("Change from computed column to persistent is not allowed.",)) + ("Change from computed column to persistent is not allowed.",)) # Array column c = self.con.schema.get_table('AR').get_column('C2') - self.assertEqual(c.datatype,'INTEGER[4, 0:3, 2]') + self.assertEqual(c.datatype, 'INTEGER[4, 0:3, 2]') # Identity column if self.con.ods >= fdb.ODS_FB_30: c = self.con.schema.get_table('T5').get_column('ID') self.assertTrue(c.isidentity()) self.assertTrue(c.generator.isidentity()) - self.assertEqual(c.identity_type,1) + self.assertEqual(c.identity_type, 1) # - self.assertEqual(c.get_sql_for('alter',restart=None), - "ALTER TABLE T5 ALTER COLUMN ID RESTART") - self.assertEqual(c.get_sql_for('alter',restart=100), - "ALTER TABLE T5 ALTER COLUMN ID RESTART WITH 100") + self.assertEqual(c.get_sql_for('alter', restart=None), + "ALTER TABLE T5 ALTER COLUMN ID RESTART") + self.assertEqual(c.get_sql_for('alter', restart=100), + "ALTER TABLE T5 ALTER COLUMN ID RESTART WITH 100") def testIndex(self): # System index c = self.con.schema.get_index('RDB$INDEX_0') # common properties - self.assertEqual(c.name,'RDB$INDEX_0') + self.assertEqual(c.name, 'RDB$INDEX_0') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['recompute']) + self.assertListEqual(c.actions, ['activate', 'recompute', 'comment']) self.assertTrue(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'RDB$INDEX_0') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'RDB$INDEX_0') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) # - self.assertEqual(c.table.name,'RDB$RELATIONS') - self.assertListEqual(c.segment_names,['RDB$RELATION_NAME']) + self.assertEqual(c.table.name, 'RDB$RELATIONS') + self.assertListEqual(c.segment_names, ['RDB$RELATION_NAME']) # user index c = self.con.schema.get_index('MAXSALX') # common properties - self.assertEqual(c.name,'MAXSALX') + self.assertEqual(c.name, 'MAXSALX') self.assertIsNone(c.description) - self.assertListEqual(c.actions, - ['create', 'activate', 'deactivate', 'recompute', 'drop']) + self.assertListEqual(c.actions, ['activate', 'recompute', 'comment', 'create', 'deactivate', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'MAXSALX') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) - # - self.assertEqual(c.id,3) - self.assertEqual(c.table.name,'JOB') - self.assertEqual(c.index_type,'DESCENDING') + self.assertEqual(c.get_quoted_name(), 'MAXSALX') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) + # + self.assertEqual(c.id, 3) + self.assertEqual(c.table.name, 'JOB') + self.assertEqual(c.index_type, 'DESCENDING') self.assertIsNone(c.partner_index) self.assertIsNone(c.expression) # startswith() is necessary, because Python 3 returns more precise value. self.assertTrue(str(c.statistics).startswith('0.0384615398943')) - self.assertListEqual(c.segment_names,['JOB_COUNTRY', 'MAX_SALARY']) - self.assertEqual(len(c.segments),2) + self.assertListEqual(c.segment_names, ['JOB_COUNTRY', 'MAX_SALARY']) + self.assertEqual(len(c.segments), 2) for segment in c.segments: - self.assertIsInstance(segment,sm.TableColumn) - self.assertEqual(c.segments[0].name,'JOB_COUNTRY') - self.assertEqual(c.segments[1].name,'MAX_SALARY') + self.assertIsInstance(segment, sm.TableColumn) + self.assertEqual(c.segments[0].name, 'JOB_COUNTRY') + self.assertEqual(c.segments[1].name, 'MAX_SALARY') if self.con.ods <= fdb.ODS_FB_20: - self.assertListEqual(c.segment_statistics,[None, None]) + self.assertListEqual(c.segment_statistics, [None, None]) elif self.con.ods > fdb.ODS_FB_20: self.assertListEqual(c.segment_statistics, [0.1428571492433548, 0.03846153989434242]) @@ -2949,98 +3036,104 @@ self.assertFalse(c.isinactive()) self.assertFalse(c.isenforcer()) # - self.assertEqual(c.get_sql_for('create'),"""CREATE DESCENDING INDEX MAXSALX - ON JOB (JOB_COUNTRY,MAX_SALARY)""") - self.assertEqual(c.get_sql_for('activate'),"ALTER INDEX MAXSALX ACTIVE") - self.assertEqual(c.get_sql_for('deactivate'),"ALTER INDEX MAXSALX INACTIVE") - self.assertEqual(c.get_sql_for('recompute'),"SET STATISTICS INDEX MAXSALX") - self.assertEqual(c.get_sql_for('drop'),"DROP INDEX MAXSALX") + self.assertEqual(c.get_sql_for('create'), + """CREATE DESCENDING INDEX MAXSALX ON JOB (JOB_COUNTRY,MAX_SALARY)""") + self.assertEqual(c.get_sql_for('activate'), "ALTER INDEX MAXSALX ACTIVE") + self.assertEqual(c.get_sql_for('deactivate'), "ALTER INDEX MAXSALX INACTIVE") + self.assertEqual(c.get_sql_for('recompute'), "SET STATISTICS INDEX MAXSALX") + self.assertEqual(c.get_sql_for('drop'), "DROP INDEX MAXSALX") + self.assertEqual(c.get_sql_for('comment'), + "COMMENT ON INDEX MAXSALX IS NULL") # Constraint index c = self.con.schema.get_index('RDB$FOREIGN6') # common properties - self.assertEqual(c.name,'RDB$FOREIGN6') + self.assertEqual(c.name, 'RDB$FOREIGN6') self.assertTrue(c.issystemobject()) self.assertTrue(c.isenforcer()) - self.assertEqual(c.partner_index.name,'RDB$PRIMARY5') - self.assertEqual(c.constraint.name,'INTEG_17') + self.assertEqual(c.partner_index.name, 'RDB$PRIMARY5') + self.assertEqual(c.constraint.name, 'INTEG_17') def testViewColumn(self): c = self.con.schema.get_view('PHONE_LIST').get_column('LAST_NAME') # common properties - self.assertEqual(c.name,'LAST_NAME') + self.assertEqual(c.name, 'LAST_NAME') self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, ['comment']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'LAST_NAME') - self.assertListEqual(c.get_dependents(),[]) + self.assertEqual(c.get_quoted_name(), 'LAST_NAME') + self.assertListEqual(c.get_dependents(), []) d = c.get_dependencies() - self.assertEqual(len(d),1) + self.assertEqual(len(d), 1) d = d[0] - self.assertEqual(d.dependent_name,'PHONE_LIST') - self.assertEqual(d.dependent_type,1) - self.assertIsInstance(d.dependent,sm.View) - self.assertEqual(d.field_name,'LAST_NAME') - self.assertEqual(d.depended_on_name,'EMPLOYEE') - self.assertEqual(d.depended_on_type,0) - self.assertIsInstance(d.depended_on,sm.TableColumn) - self.assertEqual(d.depended_on.name,'LAST_NAME') - self.assertEqual(d.depended_on.table.name,'EMPLOYEE') - # - self.assertEqual(c.view.name,'PHONE_LIST') - self.assertEqual(c.base_field.name,'LAST_NAME') - self.assertEqual(c.base_field.table.name,'EMPLOYEE') - self.assertEqual(c.domain.name,'LASTNAME') - self.assertEqual(c.position,2) + self.assertEqual(d.dependent_name, 'PHONE_LIST') + self.assertEqual(d.dependent_type, 1) + self.assertIsInstance(d.dependent, sm.View) + self.assertEqual(d.field_name, 'LAST_NAME') + self.assertEqual(d.depended_on_name, 'EMPLOYEE') + self.assertEqual(d.depended_on_type, 0) + self.assertIsInstance(d.depended_on, sm.TableColumn) + self.assertEqual(d.depended_on.name, 'LAST_NAME') + self.assertEqual(d.depended_on.table.name, 'EMPLOYEE') + # + self.assertEqual(c.view.name, 'PHONE_LIST') + self.assertEqual(c.base_field.name, 'LAST_NAME') + self.assertEqual(c.base_field.table.name, 'EMPLOYEE') + self.assertEqual(c.domain.name, 'LASTNAME') + self.assertEqual(c.position, 2) self.assertIsNone(c.security_class) - self.assertEqual(c.collation.name,'NONE') - self.assertEqual(c.datatype,'VARCHAR(20)') + self.assertEqual(c.collation.name, 'NONE') + self.assertEqual(c.datatype, 'VARCHAR(20)') # self.assertTrue(c.isnullable()) + # + self.assertEqual(c.get_sql_for('comment'), + "COMMENT ON COLUMN PHONE_LIST.LAST_NAME IS NULL") def testDomain(self): # System domain c = self.con.schema.get_domain('RDB$6') # common properties - self.assertEqual(c.name,'RDB$6') + self.assertEqual(c.name, 'RDB$6') self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, ['comment']) self.assertTrue(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'RDB$6') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'RDB$6') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) if self.con.ods < fdb.ODS_FB_30: self.assertIsNone(c.security_class) self.assertIsNone(c.owner_name) else: - self.assertEqual(c.security_class,'SQL$439') - self.assertEqual(c.owner_name,'SYSDBA') + self.assertEqual(c.security_class, 'SQL$439') + self.assertEqual(c.owner_name, 'SYSDBA') # User domain c = self.con.schema.get_domain('PRODTYPE') # common properties - self.assertEqual(c.name,'PRODTYPE') + self.assertEqual(c.name, 'PRODTYPE') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['create', 'alter', 'drop']) + self.assertListEqual(c.actions, ['comment', 'create', + 'alter', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'PRODTYPE') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'PRODTYPE') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) # self.assertIsNone(c.expression) self.assertEqual(c.validation, - "CHECK (VALUE IN ('software', 'hardware', 'other', 'N/A'))") - self.assertEqual(c.default,"'software'") - self.assertEqual(c.length,12) - self.assertEqual(c.scale,0) - self.assertEqual(c.field_type,37) - self.assertEqual(c.sub_type,0) + "CHECK (VALUE IN ('software', 'hardware', 'other', 'N/A'))") + self.assertEqual(c.default, "'software'") + self.assertEqual(c.length, 12) + self.assertEqual(c.scale, 0) + self.assertEqual(c.field_type, 37) + self.assertEqual(c.sub_type, 0) self.assertIsNone(c.segment_length) self.assertIsNone(c.external_length) self.assertIsNone(c.external_scale) self.assertIsNone(c.external_type) - self.assertListEqual(c.dimensions,[]) - self.assertEqual(c.character_length,12) - self.assertEqual(c.collation.name,'NONE') - self.assertEqual(c.character_set.name,'NONE') + self.assertListEqual(c.dimensions, []) + self.assertEqual(c.character_length, 12) + self.assertEqual(c.collation.name, 'NONE') + self.assertEqual(c.character_set.name, 'NONE') self.assertIsNone(c.precision) - self.assertEqual(c.datatype,'VARCHAR(12)') + self.assertEqual(c.datatype, 'VARCHAR(12)') # self.assertFalse(c.isnullable()) self.assertFalse(c.iscomputed()) @@ -3049,83 +3142,85 @@ self.assertTrue(c.has_default()) # self.assertEqual(c.get_sql_for('create'), - "CREATE DOMAIN PRODTYPE AS VARCHAR(12) DEFAULT 'software' " \ - "CHECK (VALUE IN ('software', 'hardware', 'other', 'N/A'))") - self.assertEqual(c.get_sql_for('drop'),"DROP DOMAIN PRODTYPE") - self.assertEqual(c.get_sql_for('alter',name='New_name'), - 'ALTER DOMAIN PRODTYPE TO "New_name"') - self.assertEqual(c.get_sql_for('alter',default="'New_default'"), - "ALTER DOMAIN PRODTYPE SET DEFAULT 'New_default'") - self.assertEqual(c.get_sql_for('alter',check="VALUE STARTS WITH 'X'"), - "ALTER DOMAIN PRODTYPE ADD CHECK (VALUE STARTS WITH 'X')") - self.assertEqual(c.get_sql_for('alter',datatype='VARCHAR(30)'), - "ALTER DOMAIN PRODTYPE TYPE VARCHAR(30)") + "CREATE DOMAIN PRODTYPE AS VARCHAR(12) DEFAULT 'software' " \ + "NOT NULL CHECK (VALUE IN ('software', 'hardware', 'other', 'N/A'))") + self.assertEqual(c.get_sql_for('drop'), "DROP DOMAIN PRODTYPE") + self.assertEqual(c.get_sql_for('alter', name='New_name'), + 'ALTER DOMAIN PRODTYPE TO "New_name"') + self.assertEqual(c.get_sql_for('alter', default="'New_default'"), + "ALTER DOMAIN PRODTYPE SET DEFAULT 'New_default'") + self.assertEqual(c.get_sql_for('alter', check="VALUE STARTS WITH 'X'"), + "ALTER DOMAIN PRODTYPE ADD CHECK (VALUE STARTS WITH 'X')") + self.assertEqual(c.get_sql_for('alter', datatype='VARCHAR(30)'), + "ALTER DOMAIN PRODTYPE TYPE VARCHAR(30)") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',badparam=10) + c.get_sql_for('alter', badparam=10) self.assertTupleEqual(cm.exception.args, - ("Unsupported parameter(s) 'badparam'",)) + ("Unsupported parameter(s) 'badparam'",)) with self.assertRaises(fdb.ProgrammingError) as cm: c.get_sql_for('alter') - self.assertTupleEqual(cm.exception.args,("Parameter required.",)) + self.assertTupleEqual(cm.exception.args, ("Parameter required.",)) # Domain with quoted name c = self.con.schema.get_domain('FIRSTNAME') - self.assertEqual(c.name,'FIRSTNAME') - self.assertEqual(c.get_quoted_name(),'"FIRSTNAME"') + self.assertEqual(c.name, 'FIRSTNAME') + self.assertEqual(c.get_quoted_name(), '"FIRSTNAME"') self.assertEqual(c.get_sql_for('create'), - 'CREATE DOMAIN "FIRSTNAME" AS VARCHAR(15)') + 'CREATE DOMAIN "FIRSTNAME" AS VARCHAR(15)') + self.assertEqual(c.get_sql_for('comment'), + 'COMMENT ON DOMAIN "FIRSTNAME" IS NULL') def testDependency(self): l = self.con.schema.get_table('DEPARTMENT').get_dependents() - self.assertEqual(len(l),18) + self.assertEqual(len(l), 18) c = l[0] if self.con.ods < fdb.ODS_FB_30 else l[3] # common properties self.assertIsNone(c.name) self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, []) self.assertTrue(c.issystemobject()) self.assertIsNone(c.get_quoted_name()) - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) self.assertIsNone(c.package) self.assertFalse(c.ispackaged()) # - self.assertEqual(c.dependent_name,'PHONE_LIST') - self.assertEqual(c.dependent_type,1) - self.assertIsInstance(c.dependent,sm.View) - self.assertEqual(c.dependent.name,'PHONE_LIST') - self.assertEqual(c.field_name,'DEPT_NO') - self.assertEqual(c.depended_on_name,'DEPARTMENT') - self.assertEqual(c.depended_on_type,0) - self.assertIsInstance(c.depended_on,sm.TableColumn) - self.assertEqual(c.depended_on.name,'DEPT_NO') + self.assertEqual(c.dependent_name, 'PHONE_LIST') + self.assertEqual(c.dependent_type, 1) + self.assertIsInstance(c.dependent, sm.View) + self.assertEqual(c.dependent.name, 'PHONE_LIST') + self.assertEqual(c.field_name, 'DEPT_NO') + self.assertEqual(c.depended_on_name, 'DEPARTMENT') + self.assertEqual(c.depended_on_type, 0) + self.assertIsInstance(c.depended_on, sm.TableColumn) + self.assertEqual(c.depended_on.name, 'DEPT_NO') # if self.con.engine_version >= 3.0: - self.assertListEqual(c.get_dependents(),[]) + self.assertListEqual(c.get_dependents(), []) l = self.con.schema.get_package('TEST2').get_dependencies() - self.assertEqual(len(l),2) + self.assertEqual(len(l), 2) x = l[0] - self.assertEqual(x.depended_on.name,'FN') + self.assertEqual(x.depended_on.name, 'FN') self.assertFalse(x.depended_on.ispackaged()) x = l[1] - self.assertEqual(x.depended_on.name,'F') + self.assertEqual(x.depended_on.name, 'F') self.assertTrue(x.depended_on.ispackaged()) - self.assertIsInstance(x.package,sm.Package) + self.assertIsInstance(x.package, sm.Package) def testConstraint(self): # Common / PRIMARY KEY c = self.con.schema.get_table('CUSTOMER').primary_key # common properties - self.assertEqual(c.name,'INTEG_60') + self.assertEqual(c.name, 'INTEG_60') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['create', 'drop']) + self.assertListEqual(c.actions, ['create', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'INTEG_60') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) - # - self.assertEqual(c.constraint_type,'PRIMARY KEY') - self.assertEqual(c.table.name,'CUSTOMER') - self.assertEqual(c.index.name,'RDB$PRIMARY22') - self.assertListEqual(c.trigger_names,[]) - self.assertListEqual(c.triggers,[]) + self.assertEqual(c.get_quoted_name(), 'INTEG_60') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) + # + self.assertEqual(c.constraint_type, 'PRIMARY KEY') + self.assertEqual(c.table.name, 'CUSTOMER') + self.assertEqual(c.index.name, 'RDB$PRIMARY22') + self.assertListEqual(c.trigger_names, []) + self.assertListEqual(c.triggers, []) self.assertIsNone(c.column_name) self.assertIsNone(c.partner_constraint) self.assertIsNone(c.match_option) @@ -3141,23 +3236,23 @@ self.assertFalse(c.isdeferred()) # self.assertEqual(c.get_sql_for('create'), - "ALTER TABLE CUSTOMER ADD PRIMARY KEY (CUST_NO)") + "ALTER TABLE CUSTOMER ADD PRIMARY KEY (CUST_NO)") self.assertEqual(c.get_sql_for('drop'), - "ALTER TABLE CUSTOMER DROP CONSTRAINT INTEG_60") + "ALTER TABLE CUSTOMER DROP CONSTRAINT INTEG_60") # FOREIGN KEY c = self.con.schema.get_table('CUSTOMER').foreign_keys[0] # - self.assertListEqual(c.actions,['create', 'drop']) - self.assertEqual(c.constraint_type,'FOREIGN KEY') - self.assertEqual(c.table.name,'CUSTOMER') - self.assertEqual(c.index.name,'RDB$FOREIGN23') - self.assertListEqual(c.trigger_names,[]) - self.assertListEqual(c.triggers,[]) + self.assertListEqual(c.actions, ['create', 'drop']) + self.assertEqual(c.constraint_type, 'FOREIGN KEY') + self.assertEqual(c.table.name, 'CUSTOMER') + self.assertEqual(c.index.name, 'RDB$FOREIGN23') + self.assertListEqual(c.trigger_names, []) + self.assertListEqual(c.triggers, []) self.assertIsNone(c.column_name) - self.assertEqual(c.partner_constraint.name,'INTEG_2') - self.assertEqual(c.match_option,'FULL') - self.assertEqual(c.update_rule,'RESTRICT') - self.assertEqual(c.delete_rule,'RESTRICT') + self.assertEqual(c.partner_constraint.name, 'INTEG_2') + self.assertEqual(c.match_option, 'FULL') + self.assertEqual(c.update_rule, 'RESTRICT') + self.assertEqual(c.delete_rule, 'RESTRICT') # self.assertFalse(c.isnotnull()) self.assertFalse(c.ispkey()) @@ -3171,13 +3266,13 @@ # CHECK c = self.con.schema.get_constraint('INTEG_59') # - self.assertListEqual(c.actions,['create', 'drop']) - self.assertEqual(c.constraint_type,'CHECK') - self.assertEqual(c.table.name,'CUSTOMER') + self.assertListEqual(c.actions, ['create', 'drop']) + self.assertEqual(c.constraint_type, 'CHECK') + self.assertEqual(c.table.name, 'CUSTOMER') self.assertIsNone(c.index) - self.assertListEqual(c.trigger_names,['CHECK_9', 'CHECK_10']) - self.assertEqual(c.triggers[0].name,'CHECK_9') - self.assertEqual(c.triggers[1].name,'CHECK_10') + self.assertListEqual(c.trigger_names, ['CHECK_9', 'CHECK_10']) + self.assertEqual(c.triggers[0].name, 'CHECK_9') + self.assertEqual(c.triggers[1].name, 'CHECK_10') self.assertIsNone(c.column_name) self.assertIsNone(c.partner_constraint) self.assertIsNone(c.match_option) @@ -3191,16 +3286,16 @@ self.assertTrue(c.ischeck()) # self.assertEqual(c.get_sql_for('create'), - "ALTER TABLE CUSTOMER ADD CHECK (on_hold IS NULL OR on_hold = '*')") + "ALTER TABLE CUSTOMER ADD CHECK (on_hold IS NULL OR on_hold = '*')") # UNIQUE c = self.con.schema.get_constraint('INTEG_15') # - self.assertListEqual(c.actions,['create', 'drop']) - self.assertEqual(c.constraint_type,'UNIQUE') - self.assertEqual(c.table.name,'DEPARTMENT') - self.assertEqual(c.index.name,'RDB$4') - self.assertListEqual(c.trigger_names,[]) - self.assertListEqual(c.triggers,[]) + self.assertListEqual(c.actions, ['create', 'drop']) + self.assertEqual(c.constraint_type, 'UNIQUE') + self.assertEqual(c.table.name, 'DEPARTMENT') + self.assertEqual(c.index.name, 'RDB$4') + self.assertListEqual(c.trigger_names, []) + self.assertListEqual(c.triggers, []) self.assertIsNone(c.column_name) self.assertIsNone(c.partner_constraint) self.assertIsNone(c.match_option) @@ -3214,17 +3309,17 @@ self.assertFalse(c.ischeck()) # self.assertEqual(c.get_sql_for('create'), - "ALTER TABLE DEPARTMENT ADD UNIQUE (DEPARTMENT)") + "ALTER TABLE DEPARTMENT ADD UNIQUE (DEPARTMENT)") # NOT NULL c = self.con.schema.get_constraint('INTEG_13') # - self.assertListEqual(c.actions,[]) - self.assertEqual(c.constraint_type,'NOT NULL') - self.assertEqual(c.table.name,'DEPARTMENT') + self.assertListEqual(c.actions, []) + self.assertEqual(c.constraint_type, 'NOT NULL') + self.assertEqual(c.table.name, 'DEPARTMENT') self.assertIsNone(c.index) - self.assertListEqual(c.trigger_names,[]) - self.assertListEqual(c.triggers,[]) - self.assertEqual(c.column_name,'DEPT_NO') + self.assertListEqual(c.trigger_names, []) + self.assertListEqual(c.triggers, []) + self.assertEqual(c.column_name, 'DEPT_NO') self.assertIsNone(c.partner_constraint) self.assertIsNone(c.match_option) self.assertIsNone(c.update_rule) @@ -3239,94 +3334,93 @@ # System table c = self.con.schema.get_table('RDB$PAGES') # common properties - self.assertEqual(c.name,'RDB$PAGES') + self.assertEqual(c.name, 'RDB$PAGES') self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, ['comment']) self.assertTrue(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'RDB$PAGES') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'RDB$PAGES') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) # User table c = self.con.schema.get_table('EMPLOYEE') # common properties - self.assertEqual(c.name,'EMPLOYEE') + self.assertEqual(c.name, 'EMPLOYEE') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['create', 'recreate', 'drop']) + self.assertListEqual(c.actions, ['comment', 'create', + 'recreate', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'EMPLOYEE') + self.assertEqual(c.get_quoted_name(), 'EMPLOYEE') d = c.get_dependents() if self.con.ods <= fdb.ODS_FB_20: - self.assertListEqual([(x.dependent_name,x.dependent_type) for x in d], - [('RDB$9', 3), ('RDB$9', 3), ('PHONE_LIST', 1), - ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('PHONE_LIST', 1), - ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('CHECK_3', 2), - ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_3', 2), - ('CHECK_4', 2), ('CHECK_4', 2), ('CHECK_4', 2), - ('CHECK_4', 2), ('SET_EMP_NO', 2), ('SAVE_SALARY_CHANGE', 2), - ('SAVE_SALARY_CHANGE', 2), ('DELETE_EMPLOYEE', 5), - ('DELETE_EMPLOYEE', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), - ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5)]) + self.assertListEqual([(x.dependent_name, x.dependent_type) for x in d], + [('RDB$9', 3), ('RDB$9', 3), ('PHONE_LIST', 1), + ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('PHONE_LIST', 1), + ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('CHECK_3', 2), + ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_3', 2), + ('CHECK_4', 2), ('CHECK_4', 2), ('CHECK_4', 2), + ('CHECK_4', 2), ('SET_EMP_NO', 2), ('SAVE_SALARY_CHANGE', 2), + ('SAVE_SALARY_CHANGE', 2), ('DELETE_EMPLOYEE', 5), + ('DELETE_EMPLOYEE', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), + ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5)]) elif self.con.ods == fdb.ODS_FB_21: - self.assertListEqual([(x.dependent_name,x.dependent_type) for x in d], - [('PHONE_LIST', 1), ('PHONE_LIST', 1), ('PHONE_LIST', 1), - ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('CHECK_3', 2), - ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_3', 2), - ('CHECK_4', 2), ('CHECK_4', 2), ('CHECK_4', 2), - ('CHECK_4', 2), ('SET_EMP_NO', 2), ('SAVE_SALARY_CHANGE', 2), - ('SAVE_SALARY_CHANGE', 2), ('PHONE_LIST', 1), - ('DELETE_EMPLOYEE', 5), ('DELETE_EMPLOYEE', 5), - ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), - ('ORG_CHART', 5), ('ORG_CHART', 5)]) + self.assertListEqual([(x.dependent_name, x.dependent_type) for x in d], + [('PHONE_LIST', 1), ('PHONE_LIST', 1), ('PHONE_LIST', 1), + ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('CHECK_3', 2), + ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_3', 2), + ('CHECK_4', 2), ('CHECK_4', 2), ('CHECK_4', 2), + ('CHECK_4', 2), ('SET_EMP_NO', 2), ('SAVE_SALARY_CHANGE', 2), + ('SAVE_SALARY_CHANGE', 2), ('PHONE_LIST', 1), + ('DELETE_EMPLOYEE', 5), ('DELETE_EMPLOYEE', 5), + ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), + ('ORG_CHART', 5), ('ORG_CHART', 5)]) elif self.con.ods == fdb.ODS_FB_25: - self.assertListEqual([(x.dependent_name,x.dependent_type) for x in d], - [('RDB$9', 3), ('RDB$9', 3), ('PHONE_LIST', 1), - ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('CHECK_3', 2), - ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_3', 2), - ('CHECK_4', 2), ('CHECK_4', 2), ('CHECK_4', 2), - ('CHECK_4', 2), ('SET_EMP_NO', 2), ('SAVE_SALARY_CHANGE', 2), - ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('SAVE_SALARY_CHANGE', 2), - ('PHONE_LIST', 1), ('ORG_CHART', 5), ('ORG_CHART', 5), - ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), - ('DELETE_EMPLOYEE', 5), ('DELETE_EMPLOYEE', 5)]) + self.assertListEqual([(x.dependent_name, x.dependent_type) for x in d], + [('RDB$9', 3), ('RDB$9', 3), ('PHONE_LIST', 1), + ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('CHECK_3', 2), + ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_4', 2), + ('CHECK_4', 2), ('CHECK_4', 2), ('CHECK_4', 2), ('SET_EMP_NO', 2), + ('SAVE_SALARY_CHANGE', 2), ('PHONE_LIST', 1), ('PHONE_LIST', 1), + ('SAVE_SALARY_CHANGE', 2), ('PHONE_LIST', 1), ('ORG_CHART', 5), + ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), + ('DELETE_EMPLOYEE', 5), ('DELETE_EMPLOYEE', 5)]) elif self.con.ods >= fdb.ODS_FB_30: - self.assertListEqual([(x.dependent_name,x.dependent_type) for x in d], - [('SAVE_SALARY_CHANGE', 2), ('SAVE_SALARY_CHANGE', 2), ('CHECK_3', 2), - ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_4', 2), - ('CHECK_4', 2), ('CHECK_4', 2), ('CHECK_4', 2), ('PHONE_LIST', 1), - ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('PHONE_LIST', 1), - ('PHONE_LIST', 1), ('DELETE_EMPLOYEE', 5), ('DELETE_EMPLOYEE', 5), - ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), - ('ORG_CHART', 5), ('RDB$9', 3), ('RDB$9', 3), ('SET_EMP_NO', 2)] - ) - self.assertListEqual(c.get_dependencies(),[]) + self.assertListEqual([(x.dependent_name, x.dependent_type) for x in d], + [('SAVE_SALARY_CHANGE', 2), ('SAVE_SALARY_CHANGE', 2), ('CHECK_3', 2), + ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_3', 2), ('CHECK_4', 2), + ('CHECK_4', 2), ('CHECK_4', 2), ('CHECK_4', 2), ('PHONE_LIST', 1), + ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('PHONE_LIST', 1), ('PHONE_LIST', 1), + ('PHONE_LIST', 1), ('DELETE_EMPLOYEE', 5), ('DELETE_EMPLOYEE', 5), + ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), ('ORG_CHART', 5), + ('ORG_CHART', 5), ('RDB$9', 3), ('RDB$9', 3), ('SET_EMP_NO', 2)]) + self.assertListEqual(c.get_dependencies(), []) # - self.assertEqual(c.id,131) - self.assertEqual(c.dbkey_length,8) + self.assertEqual(c.id, 131) + self.assertEqual(c.dbkey_length, 8) if self.con.ods <= fdb.ODS_FB_20: - self.assertEqual(c.format,1) + self.assertEqual(c.format, 1) elif (self.con.ods > fdb.ODS_FB_20) and (self.con.ods < fdb.ODS_FB_30): - self.assertEqual(c.format,2) + self.assertEqual(c.format, 2) elif self.con.ods >= fdb.ODS_FB_30: - self.assertEqual(c.format,1) - self.assertEqual(c.table_type,'PERSISTENT') + self.assertEqual(c.format, 1) + self.assertEqual(c.table_type, 'PERSISTENT') if self.con.ods <= fdb.ODS_FB_21: - self.assertEqual(c.security_class,'SQL$EMPLOYEE') + self.assertEqual(c.security_class, 'SQL$EMPLOYEE') elif self.con.ods == fdb.ODS_FB_25: - self.assertEqual(c.security_class,'SQL$7') + self.assertEqual(c.security_class, 'SQL$7') elif self.con.ods == fdb.ODS_FB_30: - self.assertEqual(c.security_class,'SQL$440') + self.assertEqual(c.security_class, 'SQL$440') else: - self.assertEqual(c.security_class,'SQL$482') + self.assertEqual(c.security_class, 'SQL$482') self.assertIsNone(c.external_file) - self.assertEqual(c.owner_name,'SYSDBA') + self.assertEqual(c.owner_name, 'SYSDBA') if self.con.ods <= fdb.ODS_FB_20: - self.assertEqual(c.default_class,'SQL$DEFAULT5') + self.assertEqual(c.default_class, 'SQL$DEFAULT5') elif (self.con.ods > fdb.ODS_FB_20) and (self.con.ods < fdb.ODS_FB_30): - self.assertEqual(c.default_class,'SQL$DEFAULT7') + self.assertEqual(c.default_class, 'SQL$DEFAULT7') elif self.con.ods >= fdb.ODS_FB_30: - self.assertEqual(c.default_class,'SQL$DEFAULT54') - self.assertEqual(c.flags,1) - self.assertEqual(c.primary_key.name,'INTEG_27') + self.assertEqual(c.default_class, 'SQL$DEFAULT54') + self.assertEqual(c.flags, 1) + self.assertEqual(c.primary_key.name, 'INTEG_27') self.assertListEqual([x.name for x in c.foreign_keys], ['INTEG_28', 'INTEG_29']) self.assertListEqual([x.name for x in c.columns], @@ -3339,19 +3433,18 @@ 'INTEG_26', 'INTEG_27', 'INTEG_28', 'INTEG_29', 'INTEG_30']) self.assertListEqual([x.name for x in c.indices], - ['RDB$PRIMARY7', 'RDB$FOREIGN8', 'RDB$FOREIGN9', 'NAMEX']) + ['RDB$PRIMARY7', 'RDB$FOREIGN8', 'RDB$FOREIGN9', 'NAMEX']) self.assertListEqual([x.name for x in c.triggers], ['SET_EMP_NO', 'SAVE_SALARY_CHANGE']) # - self.assertEqual(c.get_column('EMP_NO').name,'EMP_NO') + self.assertEqual(c.get_column('EMP_NO').name, 'EMP_NO') self.assertFalse(c.isgtt()) self.assertTrue(c.ispersistent()) self.assertFalse(c.isexternal()) self.assertTrue(c.has_pkey()) self.assertTrue(c.has_fkey()) # - self.assertEqual(c.get_sql_for('create'),"""CREATE TABLE EMPLOYEE -( + self.assertEqual(c.get_sql_for('create'), """CREATE TABLE EMPLOYEE ( EMP_NO EMPNO NOT NULL, FIRST_NAME "FIRSTNAME" NOT NULL, LAST_NAME "LASTNAME" NOT NULL, @@ -3365,8 +3458,20 @@ FULL_NAME COMPUTED BY (last_name || ', ' || first_name), PRIMARY KEY (EMP_NO) )""") - self.assertEqual(c.get_sql_for('recreate'),"""RECREATE TABLE EMPLOYEE -( + self.assertEqual(c.get_sql_for('create', no_pk=True), """CREATE TABLE EMPLOYEE ( + EMP_NO EMPNO NOT NULL, + FIRST_NAME "FIRSTNAME" NOT NULL, + LAST_NAME "LASTNAME" NOT NULL, + PHONE_EXT VARCHAR(4), + HIRE_DATE TIMESTAMP DEFAULT 'NOW' NOT NULL, + DEPT_NO DEPTNO NOT NULL, + JOB_CODE JOBCODE NOT NULL, + JOB_GRADE JOBGRADE NOT NULL, + JOB_COUNTRY COUNTRYNAME NOT NULL, + SALARY SALARY NOT NULL, + FULL_NAME COMPUTED BY (last_name || ', ' || first_name) +)""") + self.assertEqual(c.get_sql_for('recreate'), """RECREATE TABLE EMPLOYEE ( EMP_NO EMPNO NOT NULL, FIRST_NAME "FIRSTNAME" NOT NULL, LAST_NAME "LASTNAME" NOT NULL, @@ -3380,150 +3485,155 @@ FULL_NAME COMPUTED BY (last_name || ', ' || first_name), PRIMARY KEY (EMP_NO) )""") - self.assertEqual(c.get_sql_for('drop'),"DROP TABLE EMPLOYEE") + self.assertEqual(c.get_sql_for('drop'), "DROP TABLE EMPLOYEE") + self.assertEqual(c.get_sql_for('comment'), + 'COMMENT ON TABLE EMPLOYEE IS NULL') # Identity colums if self.con.ods >= fdb.ODS_FB_30: c = self.con.schema.get_table('T5') - self.assertEqual(c.get_sql_for('create'), """CREATE TABLE T5 -( + self.assertEqual(c.get_sql_for('create'), """CREATE TABLE T5 ( ID NUMERIC(10, 0) GENERATED BY DEFAULT AS IDENTITY, C1 VARCHAR(15), UQ BIGINT GENERATED BY DEFAULT AS IDENTITY (START WITH 100), PRIMARY KEY (ID) )""") + def testView(self): # User view c = self.con.schema.get_view('PHONE_LIST') # common properties - self.assertEqual(c.name,'PHONE_LIST') + self.assertEqual(c.name, 'PHONE_LIST') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['create', 'recreate', 'alter', - 'create_or_alter', 'drop']) + self.assertListEqual(c.actions, ['comment', 'create', + 'recreate', 'alter', + 'create_or_alter', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'PHONE_LIST') - self.assertListEqual(c.get_dependents(),[]) + self.assertEqual(c.get_quoted_name(), 'PHONE_LIST') + self.assertListEqual(c.get_dependents(), []) d = c.get_dependencies() if self.con.ods < fdb.ODS_FB_30: - self.assertListEqual([(x.depended_on_name,x.field_name,x.depended_on_type) for x in d], - [('DEPARTMENT', 'DEPT_NO', 0), ('EMPLOYEE', 'DEPT_NO', 0), - ('DEPARTMENT', None, 0), ('EMPLOYEE', None, 0), - ('DEPARTMENT', 'PHONE_NO', 0), ('EMPLOYEE', 'PHONE_EXT', 0), - ('EMPLOYEE', 'LAST_NAME', 0), ('EMPLOYEE', 'EMP_NO', 0), - ('DEPARTMENT', 'LOCATION', 0), ('EMPLOYEE', 'FIRST_NAME', 0)]) - else: - self.assertListEqual([(x.depended_on_name,x.field_name,x.depended_on_type) for x in d], - [('DEPARTMENT', 'DEPT_NO', 0), ('EMPLOYEE', 'DEPT_NO', 0), - ('DEPARTMENT', None, 0), ('EMPLOYEE', None, 0), ('EMPLOYEE', 'EMP_NO', 0), - ('EMPLOYEE', 'FIRST_NAME', 0), ('EMPLOYEE', 'LAST_NAME', 0), - ('EMPLOYEE', 'PHONE_EXT', 0), ('DEPARTMENT', 'LOCATION', 0), - ('DEPARTMENT', 'PHONE_NO', 0)] - ) + self.assertListEqual([(x.depended_on_name, x.field_name, x.depended_on_type) for x in d], + [('DEPARTMENT', 'DEPT_NO', 0), ('EMPLOYEE', 'DEPT_NO', 0), + ('DEPARTMENT', None, 0), ('EMPLOYEE', None, 0), + ('DEPARTMENT', 'PHONE_NO', 0), ('EMPLOYEE', 'PHONE_EXT', 0), + ('EMPLOYEE', 'LAST_NAME', 0), ('EMPLOYEE', 'EMP_NO', 0), + ('DEPARTMENT', 'LOCATION', 0), ('EMPLOYEE', 'FIRST_NAME', 0)]) + else: + self.assertListEqual([(x.depended_on_name, x.field_name, x.depended_on_type) for x in d], + [('DEPARTMENT', 'DEPT_NO', 0), ('EMPLOYEE', 'DEPT_NO', 0), + ('DEPARTMENT', None, 0), ('EMPLOYEE', None, 0), ('EMPLOYEE', 'EMP_NO', 0), + ('EMPLOYEE', 'FIRST_NAME', 0), ('EMPLOYEE', 'LAST_NAME', 0), + ('EMPLOYEE', 'PHONE_EXT', 0), ('DEPARTMENT', 'LOCATION', 0), + ('DEPARTMENT', 'PHONE_NO', 0)]) # if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(c.id,143) + self.assertEqual(c.id, 143) else: - self.assertEqual(c.id,132) - self.assertEqual(c.sql,"""SELECT + self.assertEqual(c.id, 132) + self.assertEqual(c.sql, """SELECT emp_no, first_name, last_name, phone_ext, location, phone_no FROM employee, department WHERE employee.dept_no = department.dept_no""") - self.assertEqual(c.dbkey_length,16) - self.assertEqual(c.format,1) + self.assertEqual(c.dbkey_length, 16) + self.assertEqual(c.format, 1) if self.con.ods <= fdb.ODS_FB_21: - self.assertEqual(c.security_class,'SQL$PHONE_LIST') + self.assertEqual(c.security_class, 'SQL$PHONE_LIST') elif self.con.ods == fdb.ODS_FB_25: - self.assertEqual(c.security_class,'SQL$8') + self.assertEqual(c.security_class, 'SQL$8') elif self.con.ods == fdb.ODS_FB_30: - self.assertEqual(c.security_class,'SQL$444') + self.assertEqual(c.security_class, 'SQL$444') else: - self.assertEqual(c.security_class,'SQL$483') - self.assertEqual(c.owner_name,'SYSDBA') + self.assertEqual(c.security_class, 'SQL$483') + self.assertEqual(c.owner_name, 'SYSDBA') if self.con.ods <= fdb.ODS_FB_20: - self.assertEqual(c.default_class,'SQL$DEFAULT17') + self.assertEqual(c.default_class, 'SQL$DEFAULT17') elif (self.con.ods > fdb.ODS_FB_20) and (self.con.ods < fdb.ODS_FB_30): - self.assertEqual(c.default_class,'SQL$DEFAULT19') + self.assertEqual(c.default_class, 'SQL$DEFAULT19') elif self.con.ods >= fdb.ODS_FB_30: - self.assertEqual(c.default_class,'SQL$DEFAULT55') - self.assertEqual(c.flags,1) - self.assertListEqual([x.name for x in c.columns],['EMP_NO', 'FIRST_NAME', - 'LAST_NAME', 'PHONE_EXT', 'LOCATION', 'PHONE_NO']) - self.assertListEqual(c.triggers,[]) + self.assertEqual(c.default_class, 'SQL$DEFAULT55') + self.assertEqual(c.flags, 1) + self.assertListEqual([x.name for x in c.columns], ['EMP_NO', 'FIRST_NAME', + 'LAST_NAME', 'PHONE_EXT', + 'LOCATION', 'PHONE_NO']) + self.assertListEqual(c.triggers, []) # - self.assertEqual(c.get_column('LAST_NAME').name,'LAST_NAME') + self.assertEqual(c.get_column('LAST_NAME').name, 'LAST_NAME') self.assertFalse(c.has_checkoption()) # self.assertEqual(c.get_sql_for('create'), -"""CREATE VIEW PHONE_LIST (EMP_NO,FIRST_NAME,LAST_NAME,PHONE_EXT,LOCATION,PHONE_NO) + """CREATE VIEW PHONE_LIST (EMP_NO,FIRST_NAME,LAST_NAME,PHONE_EXT,LOCATION,PHONE_NO) AS SELECT emp_no, first_name, last_name, phone_ext, location, phone_no FROM employee, department WHERE employee.dept_no = department.dept_no""") self.assertEqual(c.get_sql_for('recreate'), -"""RECREATE VIEW PHONE_LIST (EMP_NO,FIRST_NAME,LAST_NAME,PHONE_EXT,LOCATION,PHONE_NO) + """RECREATE VIEW PHONE_LIST (EMP_NO,FIRST_NAME,LAST_NAME,PHONE_EXT,LOCATION,PHONE_NO) AS SELECT emp_no, first_name, last_name, phone_ext, location, phone_no FROM employee, department WHERE employee.dept_no = department.dept_no""") - self.assertEqual(c.get_sql_for('drop'),"DROP VIEW PHONE_LIST") - self.assertEqual(c.get_sql_for('alter',query='select * from country'), - "ALTER VIEW PHONE_LIST \n AS\n select * from country") - self.assertEqual(c.get_sql_for('alter',columns='country,currency', + self.assertEqual(c.get_sql_for('drop'), "DROP VIEW PHONE_LIST") + self.assertEqual(c.get_sql_for('alter', query='select * from country'), + "ALTER VIEW PHONE_LIST \n AS\n select * from country") + self.assertEqual(c.get_sql_for('alter', columns='country,currency', query='select * from country'), - "ALTER VIEW PHONE_LIST (country,currency)\n AS\n select * from country") - self.assertEqual(c.get_sql_for('alter',columns='country,currency', - query='select * from country',check=True), - "ALTER VIEW PHONE_LIST (country,currency)\n AS\n select * from country\n WITH CHECK OPTION") - self.assertEqual(c.get_sql_for('alter',columns=('country','currency'), - query='select * from country',check=True), - "ALTER VIEW PHONE_LIST (country,currency)\n AS\n select * from country\n WITH CHECK OPTION") + "ALTER VIEW PHONE_LIST (country,currency)\n AS\n select * from country") + self.assertEqual(c.get_sql_for('alter', columns='country,currency', + query='select * from country', check=True), + "ALTER VIEW PHONE_LIST (country,currency)\n AS\n select * from country\n WITH CHECK OPTION") + self.assertEqual(c.get_sql_for('alter', columns=('country', 'currency'), + query='select * from country', check=True), + "ALTER VIEW PHONE_LIST (country,currency)\n AS\n select * from country\n WITH CHECK OPTION") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',badparam='select * from country') + c.get_sql_for('alter', badparam='select * from country') self.assertTupleEqual(cm.exception.args, - ("Unsupported parameter(s) 'badparam'",)) + ("Unsupported parameter(s) 'badparam'",)) with self.assertRaises(fdb.ProgrammingError) as cm: c.get_sql_for('alter') - self.assertTupleEqual(cm.exception.args,("Missing required parameter: 'query'.",)) + self.assertTupleEqual(cm.exception.args, ("Missing required parameter: 'query'.",)) self.assertEqual(c.get_sql_for('create_or_alter'), -"""CREATE OR ALTER VIEW PHONE_LIST (EMP_NO,FIRST_NAME,LAST_NAME,PHONE_EXT,LOCATION,PHONE_NO) + """CREATE OR ALTER VIEW PHONE_LIST (EMP_NO,FIRST_NAME,LAST_NAME,PHONE_EXT,LOCATION,PHONE_NO) AS SELECT emp_no, first_name, last_name, phone_ext, location, phone_no FROM employee, department WHERE employee.dept_no = department.dept_no""") + self.assertEqual(c.get_sql_for('comment'), + 'COMMENT ON VIEW PHONE_LIST IS NULL') def testTrigger(self): # System trigger c = self.con.schema.get_trigger('RDB$TRIGGER_1') # common properties - self.assertEqual(c.name,'RDB$TRIGGER_1') + self.assertEqual(c.name, 'RDB$TRIGGER_1') self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, ['comment']) self.assertTrue(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'RDB$TRIGGER_1') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'RDB$TRIGGER_1') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) # User trigger c = self.con.schema.get_trigger('SET_EMP_NO') # common properties - self.assertEqual(c.name,'SET_EMP_NO') + self.assertEqual(c.name, 'SET_EMP_NO') self.assertIsNone(c.description) self.assertListEqual(c.actions, - ['create', 'recreate', 'alter', 'create_or_alter', 'drop']) + ['comment', 'create', 'recreate', 'alter', 'create_or_alter', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'SET_EMP_NO') - self.assertListEqual(c.get_dependents(),[]) + self.assertEqual(c.get_quoted_name(), 'SET_EMP_NO') + self.assertListEqual(c.get_dependents(), []) d = c.get_dependencies() - self.assertListEqual([(x.depended_on_name,x.field_name,x.depended_on_type) for x in d], - [('EMPLOYEE', 'EMP_NO', 0), ('EMP_NO_GEN', None, 14)]) + self.assertListEqual([(x.depended_on_name, x.field_name, x.depended_on_type) for x in d], + [('EMPLOYEE', 'EMP_NO', 0), ('EMP_NO_GEN', None, 14)]) # - self.assertEqual(c.relation.name,'EMPLOYEE') - self.assertEqual(c.sequence,0) - self.assertEqual(c.trigger_type,1) + self.assertEqual(c.relation.name, 'EMPLOYEE') + self.assertEqual(c.sequence, 0) + self.assertEqual(c.trigger_type, 1) self.assertEqual(c.source, - "AS\nBEGIN\n if (new.emp_no is null) then\n new.emp_no = gen_id(emp_no_gen, 1);\nEND") - self.assertEqual(c.flags,1) + "AS\nBEGIN\n if (new.emp_no is null) then\n new.emp_no = gen_id(emp_no_gen, 1);\nEND") + self.assertEqual(c.flags, 1) # self.assertTrue(c.isactive()) self.assertTrue(c.isbefore()) @@ -3532,19 +3642,19 @@ self.assertTrue(c.isinsert()) self.assertFalse(c.isupdate()) self.assertFalse(c.isdelete()) - self.assertEqual(c.get_type_as_string(),'BEFORE INSERT') + self.assertEqual(c.get_type_as_string(), 'BEFORE INSERT') # if self.con.ods < fdb.ODS_FB_30: self.assertIsNone(c.valid_blr) self.assertIsNone(c.engine_name) self.assertIsNone(c.entrypoint) else: - self.assertEqual(c.valid_blr,1) + self.assertEqual(c.valid_blr, 1) self.assertIsNone(c.engine_name) self.assertIsNone(c.entrypoint) # self.assertEqual(c.get_sql_for('create'), -"""CREATE TRIGGER SET_EMP_NO FOR EMPLOYEE ACTIVE + """CREATE TRIGGER SET_EMP_NO FOR EMPLOYEE ACTIVE BEFORE INSERT POSITION 0 AS BEGIN @@ -3552,7 +3662,7 @@ new.emp_no = gen_id(emp_no_gen, 1); END""") self.assertEqual(c.get_sql_for('recreate'), -"""RECREATE TRIGGER SET_EMP_NO FOR EMPLOYEE ACTIVE + """RECREATE TRIGGER SET_EMP_NO FOR EMPLOYEE ACTIVE BEFORE INSERT POSITION 0 AS BEGIN @@ -3562,16 +3672,16 @@ with self.assertRaises(fdb.ProgrammingError) as cm: c.get_sql_for('alter') self.assertTupleEqual(cm.exception.args, - ("Header or body definition required.",)) + ("Header or body definition required.",)) with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',declare="DECLARE VARIABLE i integer;") + c.get_sql_for('alter', declare="DECLARE VARIABLE i integer;") self.assertTupleEqual(cm.exception.args, - ("Header or body definition required.",)) - self.assertEqual(c.get_sql_for('alter',fire_on='AFTER INSERT', - active=False,sequence=0, - declare=' DECLARE VARIABLE i integer;\n DECLARE VARIABLE x integer;', - code=' i = 1;\n x = 2;'), -"""ALTER TRIGGER SET_EMP_NO INACTIVE + ("Header or body definition required.",)) + self.assertEqual(c.get_sql_for('alter', fire_on='AFTER INSERT', + active=False, sequence=0, + declare=' DECLARE VARIABLE i integer;\n DECLARE VARIABLE x integer;', + code=' i = 1;\n x = 2;'), + """ALTER TRIGGER SET_EMP_NO INACTIVE AFTER INSERT POSITION 0 AS @@ -3582,10 +3692,10 @@ x = 2; END""") self.assertEqual(c.get_sql_for('alter', - declare=['DECLARE VARIABLE i integer;', - 'DECLARE VARIABLE x integer;'], - code=['i = 1;','x = 2;']), -"""ALTER TRIGGER SET_EMP_NO + declare=['DECLARE VARIABLE i integer;', + 'DECLARE VARIABLE x integer;'], + code=['i = 1;', 'x = 2;']), + """ALTER TRIGGER SET_EMP_NO AS DECLARE VARIABLE i integer; DECLARE VARIABLE x integer; @@ -3593,11 +3703,11 @@ i = 1; x = 2; END""") - self.assertEqual(c.get_sql_for('alter',active=False), - "ALTER TRIGGER SET_EMP_NO INACTIVE") - self.assertEqual(c.get_sql_for('alter',sequence=10, - code=('i = 1;','x = 2;')), -"""ALTER TRIGGER SET_EMP_NO + self.assertEqual(c.get_sql_for('alter', active=False), + "ALTER TRIGGER SET_EMP_NO INACTIVE") + self.assertEqual(c.get_sql_for('alter', sequence=10, + code=('i = 1;', 'x = 2;')), + """ALTER TRIGGER SET_EMP_NO POSITION 10 AS BEGIN @@ -3605,18 +3715,20 @@ x = 2; END""") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',fire_on='ON CONNECT') + c.get_sql_for('alter', fire_on='ON CONNECT') self.assertTupleEqual(cm.exception.args, - ("Trigger type change is not allowed.",)) + ("Trigger type change is not allowed.",)) self.assertEqual(c.get_sql_for('create_or_alter'), -"""CREATE OR ALTER TRIGGER SET_EMP_NO FOR EMPLOYEE ACTIVE + """CREATE OR ALTER TRIGGER SET_EMP_NO FOR EMPLOYEE ACTIVE BEFORE INSERT POSITION 0 AS BEGIN if (new.emp_no is null) then new.emp_no = gen_id(emp_no_gen, 1); END""") - self.assertEqual(c.get_sql_for('drop'),"DROP TRIGGER SET_EMP_NO") + self.assertEqual(c.get_sql_for('drop'), "DROP TRIGGER SET_EMP_NO") + self.assertEqual(c.get_sql_for('comment'), + 'COMMENT ON TRIGGER SET_EMP_NO IS NULL') # Multi-trigger c = self.con.schema.get_trigger('TR_MULTI') # @@ -3632,67 +3744,71 @@ self.assertFalse(c.isinsert()) self.assertFalse(c.isupdate()) self.assertFalse(c.isdelete()) - self.assertEqual(c.get_type_as_string(),'ON CONNECT') + self.assertEqual(c.get_type_as_string(), 'ON CONNECT') def testProcedureParameter(self): # Input parameter c = self.con.schema.get_procedure('GET_EMP_PROJ').input_params[0] # common properties - self.assertEqual(c.name,'EMP_NO') + self.assertEqual(c.name, 'EMP_NO') self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, ['comment']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'EMP_NO') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) - # - self.assertEqual(c.procedure.name,'GET_EMP_PROJ') - self.assertEqual(c.sequence,0) - self.assertEqual(c.domain.name,'RDB$32') - self.assertEqual(c.datatype,'SMALLINT') - self.assertEqual(c.type_from,sm.PROCPAR_DATATYPE) + self.assertEqual(c.get_quoted_name(), 'EMP_NO') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) + # + self.assertEqual(c.procedure.name, 'GET_EMP_PROJ') + self.assertEqual(c.sequence, 0) + self.assertEqual(c.domain.name, 'RDB$32') + self.assertEqual(c.datatype, 'SMALLINT') + self.assertEqual(c.type_from, sm.PROCPAR_DATATYPE) self.assertIsNone(c.default) self.assertIsNone(c.collation) - if self.con.ods <= fdb.ODS_FB_20: - self.assertIsNone(c.mechanism) - elif self.con.ods > fdb.ODS_FB_20: - self.assertEqual(c.mechanism,0) + if self.con.ods <= fdb.ODS_FB_25: + self.assertEqual(c.mechanism, 0) + elif self.con.ods > fdb.ODS_FB_25: + self.assertEqual(c.mechanism, 0) self.assertIsNone(c.column) # self.assertTrue(c.isinput()) self.assertTrue(c.isnullable()) self.assertFalse(c.has_default()) - self.assertEqual(c.get_sql_definition(),'EMP_NO SMALLINT') + self.assertEqual(c.get_sql_definition(), 'EMP_NO SMALLINT') # Output parameter c = self.con.schema.get_procedure('GET_EMP_PROJ').output_params[0] # common properties - self.assertEqual(c.name,'PROJ_ID') + self.assertEqual(c.name, 'PROJ_ID') self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, ['comment']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'PROJ_ID') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'PROJ_ID') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) + # + self.assertEqual(c.get_sql_for('comment'), + 'COMMENT ON PARAMETER GET_EMP_PROJ.PROJ_ID IS NULL') # self.assertFalse(c.isinput()) - self.assertEqual(c.get_sql_definition(),'PROJ_ID CHAR(5)') + self.assertEqual(c.get_sql_definition(), 'PROJ_ID CHAR(5)') def testProcedure(self): c = self.con.schema.get_procedure('GET_EMP_PROJ') # common properties - self.assertEqual(c.name,'GET_EMP_PROJ') + self.assertEqual(c.name, 'GET_EMP_PROJ') self.assertIsNone(c.description) - self.assertListEqual(c.actions, - ['create', 'recreate', 'alter', 'create_or_alter', 'drop']) + self.assertListEqual(c.actions, ['comment', 'create', + 'recreate', 'alter', + 'create_or_alter', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'GET_EMP_PROJ') - self.assertListEqual(c.get_dependents(),[]) + self.assertEqual(c.get_quoted_name(), 'GET_EMP_PROJ') + self.assertListEqual(c.get_dependents(), []) d = c.get_dependencies() - self.assertListEqual([(x.depended_on_name,x.field_name,x.depended_on_type) for x in d], - [('EMPLOYEE_PROJECT', 'PROJ_ID', 0), ('EMPLOYEE_PROJECT', 'EMP_NO', 0), - ('EMPLOYEE_PROJECT', None, 0)]) + self.assertListEqual([(x.depended_on_name, x.field_name, x.depended_on_type) for x in d], + [('EMPLOYEE_PROJECT', 'PROJ_ID', 0), ('EMPLOYEE_PROJECT', 'EMP_NO', 0), + ('EMPLOYEE_PROJECT', None, 0)]) # - self.assertEqual(c.id,1) - self.assertEqual(c.source,"""BEGIN + self.assertEqual(c.id, 1) + self.assertEqual(c.source, """BEGIN FOR SELECT proj_id FROM employee_project WHERE emp_no = :emp_no @@ -3701,30 +3817,30 @@ SUSPEND; END""") if self.con.ods < fdb.ODS_FB_25: - self.assertEqual(c.security_class,'SQL$GET_EMP_PROJ') + self.assertEqual(c.security_class, 'SQL$GET_EMP_PROJ') elif self.con.ods == fdb.ODS_FB_25: - self.assertEqual(c.security_class,'SQL$20') + self.assertEqual(c.security_class, 'SQL$20') elif self.con.ods >= fdb.ODS_FB_30: - self.assertEqual(c.security_class,'SQL$473') - self.assertEqual(c.owner_name,'SYSDBA') - self.assertListEqual([x.name for x in c.input_params],['EMP_NO']) - self.assertListEqual([x.name for x in c.output_params],['PROJ_ID']) + self.assertEqual(c.security_class, 'SQL$473') + self.assertEqual(c.owner_name, 'SYSDBA') + self.assertListEqual([x.name for x in c.input_params], ['EMP_NO']) + self.assertListEqual([x.name for x in c.output_params], ['PROJ_ID']) if self.con.engine_version >= 3.0: self.assertTrue(c.valid_blr) - self.assertEqual(c.proc_type,1) + self.assertEqual(c.proc_type, 1) self.assertIsNone(c.engine_name) self.assertIsNone(c.entrypoint) self.assertIsNone(c.package) self.assertIsNone(c.privacy) else: self.assertIsNone(c.valid_blr) - self.assertEqual(c.proc_type,0) + self.assertEqual(c.proc_type, 0) # - self.assertEqual(c.get_param('EMP_NO').name,'EMP_NO') - self.assertEqual(c.get_param('PROJ_ID').name,'PROJ_ID') + self.assertEqual(c.get_param('EMP_NO').name, 'EMP_NO') + self.assertEqual(c.get_param('PROJ_ID').name, 'PROJ_ID') # self.assertEqual(c.get_sql_for('create'), -"""CREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) + """CREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) RETURNS (PROJ_ID CHAR(5)) AS BEGIN @@ -3735,14 +3851,23 @@ DO SUSPEND; END""") - self.assertEqual(c.get_sql_for('create',no_code=True), -"""CREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) + if self.version == FB30: + self.assertEqual(c.get_sql_for('create', no_code=True), + """CREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) +RETURNS (PROJ_ID CHAR(5)) +AS +BEGIN + SUSPEND; +END""") + else: + self.assertEqual(c.get_sql_for('create', no_code=True), + """CREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) RETURNS (PROJ_ID CHAR(5)) AS BEGIN END""") self.assertEqual(c.get_sql_for('recreate'), -"""RECREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) + """RECREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) RETURNS (PROJ_ID CHAR(5)) AS BEGIN @@ -3753,14 +3878,24 @@ DO SUSPEND; END""") - self.assertEqual(c.get_sql_for('recreate',no_code=True), -"""RECREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) + if self.version == FB30: + self.assertEqual(c.get_sql_for('recreate', no_code=True), + """RECREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) +RETURNS (PROJ_ID CHAR(5)) +AS +BEGIN + SUSPEND; +END""") + else: + self.assertEqual(c.get_sql_for('recreate', no_code=True), + """RECREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) RETURNS (PROJ_ID CHAR(5)) AS BEGIN END""") + self.assertEqual(c.get_sql_for('create_or_alter'), -"""CREATE OR ALTER PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) + """CREATE OR ALTER PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) RETURNS (PROJ_ID CHAR(5)) AS BEGIN @@ -3771,50 +3906,59 @@ DO SUSPEND; END""") - self.assertEqual(c.get_sql_for('create_or_alter',no_code=True), -"""CREATE OR ALTER PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) + if self.version == FB30: + self.assertEqual(c.get_sql_for('create_or_alter', no_code=True), + """CREATE OR ALTER PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) +RETURNS (PROJ_ID CHAR(5)) +AS +BEGIN + SUSPEND; +END""") + else: + self.assertEqual(c.get_sql_for('create_or_alter', no_code=True), + """CREATE OR ALTER PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT) RETURNS (PROJ_ID CHAR(5)) AS BEGIN END""") - self.assertEqual(c.get_sql_for('drop'),"DROP PROCEDURE GET_EMP_PROJ") - self.assertEqual(c.get_sql_for('alter',code=" /* PASS */"), -"""ALTER PROCEDURE GET_EMP_PROJ + self.assertEqual(c.get_sql_for('drop'), "DROP PROCEDURE GET_EMP_PROJ") + self.assertEqual(c.get_sql_for('alter', code=" /* PASS */"), + """ALTER PROCEDURE GET_EMP_PROJ AS BEGIN /* PASS */ END""") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',declare="DECLARE VARIABLE i integer;") - self.assertTupleEqual(cm.exception.args, - ("Missing required parameter: 'code'.",)) - self.assertEqual(c.get_sql_for('alter',code=''), -"""ALTER PROCEDURE GET_EMP_PROJ + c.get_sql_for('alter', declare="DECLARE VARIABLE i integer;") + self.assertTupleEqual(cm.exception.args, + ("Missing required parameter: 'code'.",)) + self.assertEqual(c.get_sql_for('alter', code=''), + """ALTER PROCEDURE GET_EMP_PROJ AS BEGIN END""") - self.assertEqual(c.get_sql_for('alter',input="IN1 integer",code=''), -"""ALTER PROCEDURE GET_EMP_PROJ (IN1 integer) + self.assertEqual(c.get_sql_for('alter', input="IN1 integer", code=''), + """ALTER PROCEDURE GET_EMP_PROJ (IN1 integer) AS BEGIN END""") - self.assertEqual(c.get_sql_for('alter',output="OUT1 integer",code=''), -"""ALTER PROCEDURE GET_EMP_PROJ + self.assertEqual(c.get_sql_for('alter', output="OUT1 integer", code=''), + """ALTER PROCEDURE GET_EMP_PROJ RETURNS (OUT1 integer) AS BEGIN END""") - self.assertEqual(c.get_sql_for('alter',input="IN1 integer", - output="OUT1 integer",code=''), -"""ALTER PROCEDURE GET_EMP_PROJ (IN1 integer) + self.assertEqual(c.get_sql_for('alter', input="IN1 integer", + output="OUT1 integer", code=''), + """ALTER PROCEDURE GET_EMP_PROJ (IN1 integer) RETURNS (OUT1 integer) AS BEGIN END""") self.assertEqual(c.get_sql_for('alter', - input=["IN1 integer","IN2 VARCHAR(10)"], + input=["IN1 integer", "IN2 VARCHAR(10)"], code=''), -"""ALTER PROCEDURE GET_EMP_PROJ ( + """ALTER PROCEDURE GET_EMP_PROJ ( IN1 integer, IN2 VARCHAR(10) ) @@ -3822,9 +3966,9 @@ BEGIN END""") self.assertEqual(c.get_sql_for('alter', - output=["OUT1 integer","OUT2 VARCHAR(10)"], + output=["OUT1 integer", "OUT2 VARCHAR(10)"], code=''), -"""ALTER PROCEDURE GET_EMP_PROJ + """ALTER PROCEDURE GET_EMP_PROJ RETURNS ( OUT1 integer, OUT2 VARCHAR(10) @@ -3833,10 +3977,10 @@ BEGIN END""") self.assertEqual(c.get_sql_for('alter', - input=["IN1 integer","IN2 VARCHAR(10)"], - output=["OUT1 integer","OUT2 VARCHAR(10)"], + input=["IN1 integer", "IN2 VARCHAR(10)"], + output=["OUT1 integer", "OUT2 VARCHAR(10)"], code=''), -"""ALTER PROCEDURE GET_EMP_PROJ ( + """ALTER PROCEDURE GET_EMP_PROJ ( IN1 integer, IN2 VARCHAR(10) ) @@ -3847,62 +3991,66 @@ AS BEGIN END""") - self.assertEqual(c.get_sql_for('alter',code=" -- line 1;\n -- line 2;"), -"""ALTER PROCEDURE GET_EMP_PROJ + self.assertEqual(c.get_sql_for('alter', code=" -- line 1;\n -- line 2;"), + """ALTER PROCEDURE GET_EMP_PROJ AS BEGIN -- line 1; -- line 2; END""") - self.assertEqual(c.get_sql_for('alter',code=["-- line 1;","-- line 2;"]), -"""ALTER PROCEDURE GET_EMP_PROJ + self.assertEqual(c.get_sql_for('alter', code=["-- line 1;", "-- line 2;"]), + """ALTER PROCEDURE GET_EMP_PROJ AS BEGIN -- line 1; -- line 2; END""") - self.assertEqual(c.get_sql_for('alter',code=" /* PASS */", + self.assertEqual(c.get_sql_for('alter', code=" /* PASS */", declare=" -- line 1;\n -- line 2;"), -"""ALTER PROCEDURE GET_EMP_PROJ + """ALTER PROCEDURE GET_EMP_PROJ AS -- line 1; -- line 2; BEGIN /* PASS */ END""") - self.assertEqual(c.get_sql_for('alter',code=" /* PASS */", - declare=["-- line 1;","-- line 2;"]), -"""ALTER PROCEDURE GET_EMP_PROJ + self.assertEqual(c.get_sql_for('alter', code=" /* PASS */", + declare=["-- line 1;", "-- line 2;"]), + """ALTER PROCEDURE GET_EMP_PROJ AS -- line 1; -- line 2; BEGIN /* PASS */ END""") + self.assertEqual(c.get_sql_for('comment'), + 'COMMENT ON PROCEDURE GET_EMP_PROJ IS NULL') def testRole(self): c = self.con.schema.get_role('TEST_ROLE') # common properties - self.assertEqual(c.name,'TEST_ROLE') + self.assertEqual(c.name, 'TEST_ROLE') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['create', 'drop']) + self.assertListEqual(c.actions, ['comment', 'create', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'TEST_ROLE') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) - # - self.assertEqual(c.owner_name,'SYSDBA') - # - self.assertEqual(c.get_sql_for('create'),"CREATE ROLE TEST_ROLE") - self.assertEqual(c.get_sql_for('drop'),"DROP ROLE TEST_ROLE") - def _mockFunction(self,name): + self.assertEqual(c.get_quoted_name(), 'TEST_ROLE') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) + # + self.assertEqual(c.owner_name, 'SYSDBA') + # + self.assertEqual(c.get_sql_for('create'), "CREATE ROLE TEST_ROLE") + self.assertEqual(c.get_sql_for('drop'), "DROP ROLE TEST_ROLE") + self.assertEqual(c.get_sql_for('comment'), + 'COMMENT ON ROLE TEST_ROLE IS NULL') + def _mockFunction(self, name): f = None if name == 'STRLEN': f = sm.Function(self.con.schema, - {'RDB$ENTRYPOINT': 'IB_UDF_strlen ', - 'RDB$SYSTEM_FLAG': 0, 'RDB$RETURN_ARGUMENT': 0, - 'RDB$MODULE_NAME': 'ib_udf', 'RDB$FUNCTION_TYPE': None, - 'RDB$DESCRIPTION': None, - 'RDB$FUNCTION_NAME': 'STRLEN '}) + {'RDB$ENTRYPOINT': 'IB_UDF_strlen ', + 'RDB$SYSTEM_FLAG': 0, 'RDB$RETURN_ARGUMENT': 0, + 'RDB$MODULE_NAME': 'ib_udf', 'RDB$FUNCTION_TYPE': None, + 'RDB$DESCRIPTION': None, + 'RDB$FUNCTION_NAME': 'STRLEN '}) f._load_arguments( [{'RDB$FIELD_PRECISION': 0, 'RDB$FIELD_LENGTH': 4, 'RDB$FIELD_SCALE': 0, 'RDB$FIELD_SUB_TYPE': 0, @@ -3918,11 +4066,11 @@ 'RDB$ARGUMENT_POSITION': 1}]) elif name == 'STRING2BLOB': f = sm.Function(self.con.schema, - {'RDB$ENTRYPOINT': 'string2blob ', - 'RDB$SYSTEM_FLAG': 0, 'RDB$RETURN_ARGUMENT': 2, - 'RDB$MODULE_NAME': 'fbudf', 'RDB$FUNCTION_TYPE': None, - 'RDB$DESCRIPTION': None, - 'RDB$FUNCTION_NAME': 'STRING2BLOB '}) + {'RDB$ENTRYPOINT': 'string2blob ', + 'RDB$SYSTEM_FLAG': 0, 'RDB$RETURN_ARGUMENT': 2, + 'RDB$MODULE_NAME': 'fbudf', 'RDB$FUNCTION_TYPE': None, + 'RDB$DESCRIPTION': None, + 'RDB$FUNCTION_NAME': 'STRING2BLOB '}) f._load_arguments( [{'RDB$FIELD_PRECISION': None, 'RDB$FIELD_LENGTH': 300, 'RDB$FIELD_SCALE': 0, 'RDB$FIELD_SUB_TYPE': 0, @@ -3938,11 +4086,11 @@ 'RDB$ARGUMENT_POSITION': 2}]) elif name == 'LTRIM': f = sm.Function(self.con.schema, - {'RDB$ENTRYPOINT': 'IB_UDF_ltrim ', - 'RDB$SYSTEM_FLAG': 0, 'RDB$RETURN_ARGUMENT': 0, - 'RDB$MODULE_NAME': 'ib_udf', 'RDB$FUNCTION_TYPE': None, - 'RDB$DESCRIPTION': None, - 'RDB$FUNCTION_NAME': 'LTRIM '}) + {'RDB$ENTRYPOINT': 'IB_UDF_ltrim ', + 'RDB$SYSTEM_FLAG': 0, 'RDB$RETURN_ARGUMENT': 0, + 'RDB$MODULE_NAME': 'ib_udf', 'RDB$FUNCTION_TYPE': None, + 'RDB$DESCRIPTION': None, + 'RDB$FUNCTION_NAME': 'LTRIM '}) f._load_arguments( [{'RDB$FIELD_PRECISION': None, 'RDB$FIELD_LENGTH': 255, 'RDB$FIELD_SCALE': 0, 'RDB$FIELD_SUB_TYPE': 0, @@ -3958,11 +4106,11 @@ 'RDB$ARGUMENT_POSITION': 1}]) elif name == 'I64NVL': f = sm.Function(self.con.schema, - {'RDB$ENTRYPOINT': 'idNvl ', - 'RDB$SYSTEM_FLAG': 0, 'RDB$RETURN_ARGUMENT': 0, - 'RDB$MODULE_NAME': 'fbudf', 'RDB$FUNCTION_TYPE': None, - 'RDB$DESCRIPTION': None, - 'RDB$FUNCTION_NAME': 'I64NVL '}) + {'RDB$ENTRYPOINT': 'idNvl ', + 'RDB$SYSTEM_FLAG': 0, 'RDB$RETURN_ARGUMENT': 0, + 'RDB$MODULE_NAME': 'fbudf', 'RDB$FUNCTION_TYPE': None, + 'RDB$DESCRIPTION': None, + 'RDB$FUNCTION_NAME': 'I64NVL '}) f._load_arguments( [{'RDB$FIELD_PRECISION': 18, 'RDB$FIELD_LENGTH': 8, 'RDB$FIELD_SCALE': 0, 'RDB$FIELD_SUB_TYPE': 1, @@ -3989,27 +4137,27 @@ def testFunctionArgument(self): f = self._mockFunction('STRLEN') c = f.arguments[0] - self.assertEqual(len(f.arguments),1) + self.assertEqual(len(f.arguments), 1) # common properties - self.assertEqual(c.name,'STRLEN_1') + self.assertEqual(c.name, 'STRLEN_1') self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, []) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'STRLEN_1') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) - # - self.assertEqual(c.function.name,'STRLEN') - self.assertEqual(c.position,1) - self.assertEqual(c.mechanism,1) - self.assertEqual(c.field_type,40) - self.assertEqual(c.length,32767) - self.assertEqual(c.scale,0) + self.assertEqual(c.get_quoted_name(), 'STRLEN_1') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) + # + self.assertEqual(c.function.name, 'STRLEN') + self.assertEqual(c.position, 1) + self.assertEqual(c.mechanism, 1) + self.assertEqual(c.field_type, 40) + self.assertEqual(c.length, 32767) + self.assertEqual(c.scale, 0) self.assertIsNone(c.precision) - self.assertEqual(c.sub_type,0) - self.assertEqual(c.character_length,32767) - self.assertEqual(c.character_set.name,'NONE') - self.assertEqual(c.datatype,'CSTRING(32767)') + self.assertEqual(c.sub_type, 0) + self.assertEqual(c.character_length, 32767) + self.assertEqual(c.character_set.name, 'NONE') + self.assertEqual(c.datatype, 'CSTRING(32767)') # self.assertFalse(c.isbyvalue()) self.assertTrue(c.isbyreference()) @@ -4017,20 +4165,20 @@ self.assertFalse(c.iswithnull()) self.assertFalse(c.isfreeit()) self.assertFalse(c.isreturning()) - self.assertEqual(c.get_sql_definition(),'CSTRING(32767)') + self.assertEqual(c.get_sql_definition(), 'CSTRING(32767)') # c = f.returns # - self.assertEqual(c.position,0) - self.assertEqual(c.mechanism,0) - self.assertEqual(c.field_type,8) - self.assertEqual(c.length,4) - self.assertEqual(c.scale,0) - self.assertEqual(c.precision,0) - self.assertEqual(c.sub_type,0) + self.assertEqual(c.position, 0) + self.assertEqual(c.mechanism, 0) + self.assertEqual(c.field_type, 8) + self.assertEqual(c.length, 4) + self.assertEqual(c.scale, 0) + self.assertEqual(c.precision, 0) + self.assertEqual(c.sub_type, 0) self.assertIsNone(c.character_length) self.assertIsNone(c.character_set) - self.assertEqual(c.datatype,'INTEGER') + self.assertEqual(c.datatype, 'INTEGER') # self.assertTrue(c.isbyvalue()) self.assertFalse(c.isbyreference()) @@ -4038,22 +4186,22 @@ self.assertFalse(c.iswithnull()) self.assertFalse(c.isfreeit()) self.assertTrue(c.isreturning()) - self.assertEqual(c.get_sql_definition(),'INTEGER BY VALUE') + self.assertEqual(c.get_sql_definition(), 'INTEGER BY VALUE') # f = self._mockFunction('STRING2BLOB') - self.assertEqual(len(f.arguments),2) + self.assertEqual(len(f.arguments), 2) c = f.arguments[0] - self.assertEqual(c.function.name,'STRING2BLOB') - self.assertEqual(c.position,1) - self.assertEqual(c.mechanism,2) - self.assertEqual(c.field_type,37) - self.assertEqual(c.length,300) - self.assertEqual(c.scale,0) + self.assertEqual(c.function.name, 'STRING2BLOB') + self.assertEqual(c.position, 1) + self.assertEqual(c.mechanism, 2) + self.assertEqual(c.field_type, 37) + self.assertEqual(c.length, 300) + self.assertEqual(c.scale, 0) self.assertIsNone(c.precision) - self.assertEqual(c.sub_type,0) - self.assertEqual(c.character_length,300) - self.assertEqual(c.character_set.name,'NONE') - self.assertEqual(c.datatype,'VARCHAR(300)') + self.assertEqual(c.sub_type, 0) + self.assertEqual(c.character_length, 300) + self.assertEqual(c.character_set.name, 'NONE') + self.assertEqual(c.datatype, 'VARCHAR(300)') # self.assertFalse(c.isbyvalue()) self.assertFalse(c.isbyreference()) @@ -4061,21 +4209,21 @@ self.assertFalse(c.iswithnull()) self.assertFalse(c.isfreeit()) self.assertFalse(c.isreturning()) - self.assertEqual(c.get_sql_definition(),'VARCHAR(300) BY DESCRIPTOR') + self.assertEqual(c.get_sql_definition(), 'VARCHAR(300) BY DESCRIPTOR') # c = f.arguments[1] - self.assertIs(f.arguments[1],f.returns) - self.assertEqual(c.function.name,'STRING2BLOB') - self.assertEqual(c.position,2) - self.assertEqual(c.mechanism,3) - self.assertEqual(c.field_type,261) - self.assertEqual(c.length,8) - self.assertEqual(c.scale,0) + self.assertIs(f.arguments[1], f.returns) + self.assertEqual(c.function.name, 'STRING2BLOB') + self.assertEqual(c.position, 2) + self.assertEqual(c.mechanism, 3) + self.assertEqual(c.field_type, 261) + self.assertEqual(c.length, 8) + self.assertEqual(c.scale, 0) self.assertIsNone(c.precision) - self.assertEqual(c.sub_type,0) + self.assertEqual(c.sub_type, 0) self.assertIsNone(c.character_length) self.assertIsNone(c.character_set) - self.assertEqual(c.datatype,'BLOB') + self.assertEqual(c.datatype, 'BLOB') # self.assertFalse(c.isbyvalue()) self.assertFalse(c.isbyreference()) @@ -4084,22 +4232,22 @@ self.assertFalse(c.iswithnull()) self.assertFalse(c.isfreeit()) self.assertTrue(c.isreturning()) - self.assertEqual(c.get_sql_definition(),'BLOB') + self.assertEqual(c.get_sql_definition(), 'BLOB') # f = self._mockFunction('LTRIM') - self.assertEqual(len(f.arguments),1) + self.assertEqual(len(f.arguments), 1) c = f.arguments[0] - self.assertEqual(c.function.name,'LTRIM') - self.assertEqual(c.position,1) - self.assertEqual(c.mechanism,1) - self.assertEqual(c.field_type,40) - self.assertEqual(c.length,255) - self.assertEqual(c.scale,0) + self.assertEqual(c.function.name, 'LTRIM') + self.assertEqual(c.position, 1) + self.assertEqual(c.mechanism, 1) + self.assertEqual(c.field_type, 40) + self.assertEqual(c.length, 255) + self.assertEqual(c.scale, 0) self.assertIsNone(c.precision) - self.assertEqual(c.sub_type,0) - self.assertEqual(c.character_length,255) - self.assertEqual(c.character_set.name,'NONE') - self.assertEqual(c.datatype,'CSTRING(255)') + self.assertEqual(c.sub_type, 0) + self.assertEqual(c.character_length, 255) + self.assertEqual(c.character_set.name, 'NONE') + self.assertEqual(c.datatype, 'CSTRING(255)') # self.assertFalse(c.isbyvalue()) self.assertTrue(c.isbyreference()) @@ -4107,20 +4255,20 @@ self.assertFalse(c.iswithnull()) self.assertFalse(c.isfreeit()) self.assertFalse(c.isreturning()) - self.assertEqual(c.get_sql_definition(),'CSTRING(255)') + self.assertEqual(c.get_sql_definition(), 'CSTRING(255)') # c = f.returns - self.assertEqual(c.function.name,'LTRIM') - self.assertEqual(c.position,0) - self.assertEqual(c.mechanism,1) - self.assertEqual(c.field_type,40) - self.assertEqual(c.length,255) - self.assertEqual(c.scale,0) + self.assertEqual(c.function.name, 'LTRIM') + self.assertEqual(c.position, 0) + self.assertEqual(c.mechanism, 1) + self.assertEqual(c.field_type, 40) + self.assertEqual(c.length, 255) + self.assertEqual(c.scale, 0) self.assertIsNone(c.precision) - self.assertEqual(c.sub_type,0) - self.assertEqual(c.character_length,255) - self.assertEqual(c.character_set.name,'NONE') - self.assertEqual(c.datatype,'CSTRING(255)') + self.assertEqual(c.sub_type, 0) + self.assertEqual(c.character_length, 255) + self.assertEqual(c.character_set.name, 'NONE') + self.assertEqual(c.datatype, 'CSTRING(255)') # self.assertFalse(c.isbyvalue()) self.assertTrue(c.isbyreference()) @@ -4129,24 +4277,24 @@ self.assertFalse(c.iswithnull()) self.assertTrue(c.isfreeit()) self.assertTrue(c.isreturning()) - self.assertEqual(c.get_sql_definition(),'CSTRING(255)') + self.assertEqual(c.get_sql_definition(), 'CSTRING(255)') # f = self._mockFunction('I64NVL') - self.assertEqual(len(f.arguments),2) + self.assertEqual(len(f.arguments), 2) for a in f.arguments: - self.assertEqual(a.datatype,'NUMERIC(18, 0)') + self.assertEqual(a.datatype, 'NUMERIC(18, 0)') self.assertTrue(a.isbydescriptor()) self.assertEqual(a.get_sql_definition(), 'NUMERIC(18, 0) BY DESCRIPTOR') - self.assertEqual(f.returns.datatype,'NUMERIC(18, 0)') + self.assertEqual(f.returns.datatype, 'NUMERIC(18, 0)') self.assertTrue(f.returns.isbydescriptor()) self.assertEqual(f.returns.get_sql_definition(), 'NUMERIC(18, 0) BY DESCRIPTOR') def testFunction(self): c = self._mockFunction('STRLEN') - self.assertEqual(len(c.arguments),1) + self.assertEqual(len(c.arguments), 1) # common properties - self.assertEqual(c.name,'STRLEN') + self.assertEqual(c.name, 'STRLEN') self.assertIsNone(c.description) self.assertIsNone(c.package) self.assertIsNone(c.engine_mame) @@ -4158,47 +4306,49 @@ self.assertIsNone(c.owner_name) self.assertIsNone(c.legacy_flag) self.assertIsNone(c.deterministic_flag) - self.assertListEqual(c.actions,['declare', 'drop']) + self.assertListEqual(c.actions, ['comment', 'declare', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'STRLEN') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'STRLEN') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) self.assertFalse(c.ispackaged()) # - self.assertEqual(c.module_name,'ib_udf') - self.assertEqual(c.entrypoint,'IB_UDF_strlen') - self.assertEqual(c.returns.name,'STRLEN_0') - self.assertListEqual([a.name for a in c.arguments],['STRLEN_1']) + self.assertEqual(c.module_name, 'ib_udf') + self.assertEqual(c.entrypoint, 'IB_UDF_strlen') + self.assertEqual(c.returns.name, 'STRLEN_0') + self.assertListEqual([a.name for a in c.arguments], ['STRLEN_1']) # self.assertTrue(c.has_arguments()) self.assertTrue(c.has_return()) self.assertFalse(c.has_return_argument()) # - self.assertEqual(c.get_sql_for('drop'),"DROP EXTERNAL FUNCTION STRLEN") + self.assertEqual(c.get_sql_for('drop'), "DROP EXTERNAL FUNCTION STRLEN") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('drop',badparam='') + c.get_sql_for('drop', badparam='') self.assertTupleEqual(cm.exception.args, - ("Unsupported parameter(s) 'badparam'",)) + ("Unsupported parameter(s) 'badparam'",)) self.assertEqual(c.get_sql_for('declare'), -"""DECLARE EXTERNAL FUNCTION STRLEN + """DECLARE EXTERNAL FUNCTION STRLEN CSTRING(32767) RETURNS INTEGER BY VALUE ENTRY_POINT 'IB_UDF_strlen' MODULE_NAME 'ib_udf'""") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('declare',badparam='') + c.get_sql_for('declare', badparam='') self.assertTupleEqual(cm.exception.args, - ("Unsupported parameter(s) 'badparam'",)) + ("Unsupported parameter(s) 'badparam'",)) + self.assertEqual(c.get_sql_for('comment'), + 'COMMENT ON EXTERNAL FUNCTION STRLEN IS NULL') # c = self._mockFunction('STRING2BLOB') - self.assertEqual(len(c.arguments),2) + self.assertEqual(len(c.arguments), 2) # self.assertTrue(c.has_arguments()) self.assertTrue(c.has_return()) self.assertTrue(c.has_return_argument()) # self.assertEqual(c.get_sql_for('declare'), -"""DECLARE EXTERNAL FUNCTION STRING2BLOB + """DECLARE EXTERNAL FUNCTION STRING2BLOB VARCHAR(300) BY DESCRIPTOR, BLOB RETURNS PARAMETER 2 @@ -4206,28 +4356,28 @@ MODULE_NAME 'fbudf'""") # c = self._mockFunction('LTRIM') - self.assertEqual(len(c.arguments),1) + self.assertEqual(len(c.arguments), 1) # self.assertTrue(c.has_arguments()) self.assertTrue(c.has_return()) self.assertFalse(c.has_return_argument()) # self.assertEqual(c.get_sql_for('declare'), -"""DECLARE EXTERNAL FUNCTION LTRIM + """DECLARE EXTERNAL FUNCTION LTRIM CSTRING(255) RETURNS CSTRING(255) FREE_IT ENTRY_POINT 'IB_UDF_ltrim' MODULE_NAME 'ib_udf'""") # c = self._mockFunction('I64NVL') - self.assertEqual(len(c.arguments),2) + self.assertEqual(len(c.arguments), 2) # self.assertTrue(c.has_arguments()) self.assertTrue(c.has_return()) self.assertFalse(c.has_return_argument()) # self.assertEqual(c.get_sql_for('declare'), -"""DECLARE EXTERNAL FUNCTION I64NVL + """DECLARE EXTERNAL FUNCTION I64NVL NUMERIC(18, 0) BY DESCRIPTOR, NUMERIC(18, 0) BY DESCRIPTOR RETURNS NUMERIC(18, 0) BY DESCRIPTOR @@ -4238,201 +4388,214 @@ if self.con.ods >= fdb.ODS_FB_30: c = self.con.schema.get_function('F2') # common properties - self.assertEqual(c.name,'F2') + self.assertEqual(c.name, 'F2') self.assertIsNone(c.description) self.assertIsNone(c.package) self.assertIsNone(c.engine_mame) self.assertIsNone(c.private_flag) - self.assertEqual(c.source,'BEGIN\n RETURN X+1;\nEND') - self.assertEqual(c.id,3) + self.assertEqual(c.source, 'BEGIN\n RETURN X+1;\nEND') + self.assertEqual(c.id, 3) self.assertTrue(c.valid_blr) - self.assertEqual(c.security_class,'SQL$588') - self.assertEqual(c.owner_name,'SYSDBA') - self.assertEqual(c.legacy_flag,0) - self.assertEqual(c.deterministic_flag,0) + self.assertEqual(c.security_class, 'SQL$588') + self.assertEqual(c.owner_name, 'SYSDBA') + self.assertEqual(c.legacy_flag, 0) + self.assertEqual(c.deterministic_flag, 0) # - self.assertListEqual(c.actions,['create','recreate','alter','create_or_alter','drop']) + self.assertListEqual(c.actions, ['create', 'recreate', 'alter', 'create_or_alter', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'F2') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertEqual(c.get_quoted_name(), 'F2') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) # self.assertIsNone(c.module_name) self.assertIsNone(c.entrypoint) - self.assertEqual(c.returns.name,'F2_0') - self.assertListEqual([a.name for a in c.arguments],['X']) + self.assertEqual(c.returns.name, 'F2_0') + self.assertListEqual([a.name for a in c.arguments], ['X']) # self.assertTrue(c.has_arguments()) self.assertTrue(c.has_return()) self.assertFalse(c.has_return_argument()) self.assertFalse(c.ispackaged()) # - self.assertEqual(c.get_sql_for('drop'),"DROP FUNCTION F2") + self.assertEqual(c.get_sql_for('drop'), "DROP FUNCTION F2") self.assertEqual(c.get_sql_for('create'), - """CREATE FUNCTION F2 (X INTEGER) - RETURNS INTEGER - AS - BEGIN - RETURN X+1; - END""") + """CREATE FUNCTION F2 (X INTEGER) +RETURNS INTEGER +AS +BEGIN + RETURN X+1; +END""") + self.assertEqual(c.get_sql_for('create', no_code=True), + """CREATE FUNCTION F2 (X INTEGER) +RETURNS INTEGER +AS +BEGIN +END""") self.assertEqual(c.get_sql_for('recreate'), - """RECREATE FUNCTION F2 (X INTEGER) - RETURNS INTEGER - AS - BEGIN - RETURN X+1; - END""") + """RECREATE FUNCTION F2 (X INTEGER) +RETURNS INTEGER +AS +BEGIN + RETURN X+1; +END""") self.assertEqual(c.get_sql_for('create_or_alter'), - """CREATE OR ALTER FUNCTION F2 (X INTEGER) - RETURNS INTEGER - AS - BEGIN - RETURN X+1; - END""") + """CREATE OR ALTER FUNCTION F2 (X INTEGER) +RETURNS INTEGER +AS +BEGIN + RETURN X+1; +END""") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',declare="DECLARE VARIABLE i integer;",code='') + c.get_sql_for('alter', declare="DECLARE VARIABLE i integer;", code='') self.assertTupleEqual(cm.exception.args, - ("Missing required parameter: 'returns'.",)) + ("Missing required parameter: 'returns'.",)) with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('alter',declare="DECLARE VARIABLE i integer;",returns='INTEGER') + c.get_sql_for('alter', declare="DECLARE VARIABLE i integer;", returns='INTEGER') self.assertTupleEqual(cm.exception.args, - ("Missing required parameter: 'code'.",)) - self.assertEqual(c.get_sql_for('alter',returns='INTEGER',code=''), - """ALTER FUNCTION F2 - RETURNS INTEGER - AS - BEGIN - END""") - self.assertEqual(c.get_sql_for('alter',arguments="IN1 integer",returns='INTEGER',code=''), - """ALTER FUNCTION F2 (IN1 integer) - RETURNS INTEGER - AS - BEGIN - END""") - self.assertEqual(c.get_sql_for('alter',returns='INTEGER', - arguments=["IN1 integer","IN2 VARCHAR(10)"], + ("Missing required parameter: 'code'.",)) + self.assertEqual(c.get_sql_for('alter', returns='INTEGER', code=''), + """ALTER FUNCTION F2 +RETURNS INTEGER +AS +BEGIN +END""") + self.assertEqual(c.get_sql_for('alter', arguments="IN1 integer", returns='INTEGER', code=''), + """ALTER FUNCTION F2 (IN1 integer) +RETURNS INTEGER +AS +BEGIN +END""") + self.assertEqual(c.get_sql_for('alter', returns='INTEGER', + arguments=["IN1 integer", "IN2 VARCHAR(10)"], code=''), - """ALTER FUNCTION F2 ( - IN1 integer, - IN2 VARCHAR(10) - ) - RETURNS INTEGER - AS - BEGIN - END""") + """ALTER FUNCTION F2 ( + IN1 integer, + IN2 VARCHAR(10) +) +RETURNS INTEGER +AS +BEGIN +END""") # c = self.con.schema.get_function('FX') - self.assertEqual(c.get_sql_for('create'), - """CREATE FUNCTION FX ( - F TYPE OF "FIRSTNAME", - L TYPE OF COLUMN CUSTOMER.CONTACT_LAST - ) - RETURNS VARCHAR(35) - AS - BEGIN - RETURN L || ', ' || F; - END""") + self.assertEqual(c.get_sql_for('create'),"""CREATE FUNCTION FX ( + F TYPE OF "FIRSTNAME", + L TYPE OF COLUMN CUSTOMER.CONTACT_LAST +) +RETURNS VARCHAR(35) +AS +BEGIN + RETURN L || \', \' || F; +END""") + #"""CREATE FUNCTION FX ( + #L TYPE OF COLUMN CUSTOMER.CONTACT_LAST +#) +#RETURNS VARCHAR(35) +#AS +#BEGIN + #RETURN L || ', ' || F; +#END""") # c = self.con.schema.get_function('F1') - self.assertEqual(c.name,'F1') + self.assertEqual(c.name, 'F1') self.assertIsNotNone(c.package) - self.assertIsInstance(c.package,sm.Package) - self.assertListEqual(c.actions,[]) + self.assertIsInstance(c.package, sm.Package) + self.assertListEqual(c.actions, []) self.assertTrue(c.private_flag) self.assertTrue(c.ispackaged()) def testDatabaseFile(self): # We have to use mock - c = sm.DatabaseFile(self.con.schema,{'RDB$FILE_LENGTH': 1000, - 'RDB$FILE_NAME': '/path/dbfile.f02', - 'RDB$FILE_START': 500, - 'RDB$FILE_SEQUENCE': 1}) + c = sm.DatabaseFile(self.con.schema, {'RDB$FILE_LENGTH': 1000, + 'RDB$FILE_NAME': '/path/dbfile.f02', + 'RDB$FILE_START': 500, + 'RDB$FILE_SEQUENCE': 1}) # common properties - self.assertEqual(c.name,'FILE_1') + self.assertEqual(c.name, 'FILE_1') self.assertIsNone(c.description) - self.assertListEqual(c.actions,[]) + self.assertListEqual(c.actions, []) self.assertTrue(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'FILE_1') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) - # - self.assertEqual(c.filename,'/path/dbfile.f02') - self.assertEqual(c.sequence,1) - self.assertEqual(c.start,500) - self.assertEqual(c.length,1000) + self.assertEqual(c.get_quoted_name(), 'FILE_1') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) + # + self.assertEqual(c.filename, '/path/dbfile.f02') + self.assertEqual(c.sequence, 1) + self.assertEqual(c.start, 500) + self.assertEqual(c.length, 1000) # def testShadow(self): # We have to use mocks - c = sm.Shadow(self.con.schema,{'RDB$FILE_FLAGS': 1, - 'RDB$SHADOW_NUMBER': 3}) + c = sm.Shadow(self.con.schema, {'RDB$FILE_FLAGS': 1, + 'RDB$SHADOW_NUMBER': 3}) files = [] - files.append(sm.DatabaseFile(self.con.schema,{'RDB$FILE_LENGTH': 500, - 'RDB$FILE_NAME': '/path/shadow.sf1', - 'RDB$FILE_START': 0, - 'RDB$FILE_SEQUENCE': 0})) - files.append(sm.DatabaseFile(self.con.schema,{'RDB$FILE_LENGTH': 500, - 'RDB$FILE_NAME': '/path/shadow.sf2', - 'RDB$FILE_START': 1000, - 'RDB$FILE_SEQUENCE': 1})) - files.append(sm.DatabaseFile(self.con.schema,{'RDB$FILE_LENGTH': 0, - 'RDB$FILE_NAME': '/path/shadow.sf3', - 'RDB$FILE_START': 1500, - 'RDB$FILE_SEQUENCE': 2})) + files.append(sm.DatabaseFile(self.con.schema, {'RDB$FILE_LENGTH': 500, + 'RDB$FILE_NAME': '/path/shadow.sf1', + 'RDB$FILE_START': 0, + 'RDB$FILE_SEQUENCE': 0})) + files.append(sm.DatabaseFile(self.con.schema, {'RDB$FILE_LENGTH': 500, + 'RDB$FILE_NAME': '/path/shadow.sf2', + 'RDB$FILE_START': 1000, + 'RDB$FILE_SEQUENCE': 1})) + files.append(sm.DatabaseFile(self.con.schema, {'RDB$FILE_LENGTH': 0, + 'RDB$FILE_NAME': '/path/shadow.sf3', + 'RDB$FILE_START': 1500, + 'RDB$FILE_SEQUENCE': 2})) c.__dict__['_Shadow__files'] = files # common properties - self.assertEqual(c.name,'SHADOW_3') + self.assertEqual(c.name, 'SHADOW_3') self.assertIsNone(c.description) - self.assertListEqual(c.actions,['create', 'drop']) + self.assertListEqual(c.actions, ['create', 'drop']) self.assertFalse(c.issystemobject()) - self.assertEqual(c.get_quoted_name(),'SHADOW_3') - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) - # - self.assertEqual(c.id,3) - self.assertEqual(c.flags,1) - self.assertListEqual([(f.name,f.filename,f.start,f.length) for f in c.files], - [('FILE_0', '/path/shadow.sf1', 0, 500), - ('FILE_1', '/path/shadow.sf2', 1000, 500), - ('FILE_2', '/path/shadow.sf3', 1500, 0)]) + self.assertEqual(c.get_quoted_name(), 'SHADOW_3') + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) + # + self.assertEqual(c.id, 3) + self.assertEqual(c.flags, 1) + self.assertListEqual([(f.name, f.filename, f.start, f.length) for f in c.files], + [('FILE_0', '/path/shadow.sf1', 0, 500), + ('FILE_1', '/path/shadow.sf2', 1000, 500), + ('FILE_2', '/path/shadow.sf3', 1500, 0)]) # self.assertFalse(c.isconditional()) self.assertFalse(c.isinactive()) self.assertFalse(c.ismanual()) # self.assertEqual(c.get_sql_for('create'), -"""CREATE SHADOW 3 AUTO '/path/shadow.sf1' LENGTH 500 + """CREATE SHADOW 3 AUTO '/path/shadow.sf1' LENGTH 500 FILE '/path/shadow.sf2' STARTING AT 1000 LENGTH 500 FILE '/path/shadow.sf3' STARTING AT 1500""") - self.assertEqual(c.get_sql_for('drop'),"DROP SHADOW 3") - self.assertEqual(c.get_sql_for('drop',preserve=True),"DROP SHADOW 3 PRESERVE FILE") + self.assertEqual(c.get_sql_for('drop'), "DROP SHADOW 3") + self.assertEqual(c.get_sql_for('drop', preserve=True), "DROP SHADOW 3 PRESERVE FILE") def testPrivilegeBasic(self): p = self.con.schema.get_procedure('ALL_LANGS') # - self.assertIsInstance(p.privileges,list) - self.assertEqual(len(p.privileges),2) + self.assertIsInstance(p.privileges, list) + self.assertEqual(len(p.privileges), 2) c = p.privileges[0] # common properties self.assertIsNone(c.name) self.assertIsNone(c.description) - self.assertListEqual(c.actions,['grant', 'revoke']) + self.assertListEqual(c.actions, ['grant', 'revoke']) self.assertTrue(c.issystemobject()) self.assertIsNone(c.get_quoted_name()) - self.assertListEqual(c.get_dependents(),[]) - self.assertListEqual(c.get_dependencies(),[]) + self.assertListEqual(c.get_dependents(), []) + self.assertListEqual(c.get_dependencies(), []) # - self.assertIsInstance(c.user,fdb.services.User) - self.assertIn(c.user.name,['SYSDBA','PUBLIC']) - self.assertIsInstance(c.grantor,fdb.services.User) - self.assertEqual(c.grantor.name,'SYSDBA') - self.assertEqual(c.privilege,'X') - self.assertIsInstance(c.subject,sm.Procedure) - self.assertEqual(c.subject.name,'ALL_LANGS') - self.assertIn(c.user_name,['SYSDBA','PUBLIC']) - self.assertEqual(c.user_type,self.con.schema.enum_object_type_codes['USER']) - self.assertEqual(c.grantor_name,'SYSDBA') - self.assertEqual(c.subject_name,'ALL_LANGS') - self.assertEqual(c.subject_type,self.con.schema.enum_object_type_codes['PROCEDURE']) + self.assertIsInstance(c.user, fdb.services.User) + self.assertIn(c.user.name, ['SYSDBA', 'PUBLIC']) + self.assertIsInstance(c.grantor, fdb.services.User) + self.assertEqual(c.grantor.name, 'SYSDBA') + self.assertEqual(c.privilege, 'X') + self.assertIsInstance(c.subject, sm.Procedure) + self.assertEqual(c.subject.name, 'ALL_LANGS') + self.assertIn(c.user_name, ['SYSDBA', 'PUBLIC']) + self.assertEqual(c.user_type, self.con.schema.enum_object_type_codes['USER']) + self.assertEqual(c.grantor_name, 'SYSDBA') + self.assertEqual(c.subject_name, 'ALL_LANGS') + self.assertEqual(c.subject_type, self.con.schema.enum_object_type_codes['PROCEDURE']) self.assertIsNone(c.field_name) # self.assertFalse(c.has_grant()) @@ -4445,737 +4608,737 @@ self.assertFalse(c.ismembership()) # self.assertEqual(c.get_sql_for('grant'), - "GRANT EXECUTE ON PROCEDURE ALL_LANGS TO SYSDBA") - self.assertEqual(c.get_sql_for('grant',grantors=[]), - "GRANT EXECUTE ON PROCEDURE ALL_LANGS TO SYSDBA GRANTED BY SYSDBA") - self.assertEqual(c.get_sql_for('grant',grantors=['SYSDBA','TEST_USER']), - "GRANT EXECUTE ON PROCEDURE ALL_LANGS TO SYSDBA") + "GRANT EXECUTE ON PROCEDURE ALL_LANGS TO SYSDBA") + self.assertEqual(c.get_sql_for('grant', grantors=[]), + "GRANT EXECUTE ON PROCEDURE ALL_LANGS TO SYSDBA GRANTED BY SYSDBA") + self.assertEqual(c.get_sql_for('grant', grantors=['SYSDBA', 'TEST_USER']), + "GRANT EXECUTE ON PROCEDURE ALL_LANGS TO SYSDBA") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('grant',badparam=True) + c.get_sql_for('grant', badparam=True) self.assertTupleEqual(cm.exception.args, - ("Unsupported parameter(s) 'badparam'",)) + ("Unsupported parameter(s) 'badparam'",)) self.assertEqual(c.get_sql_for('revoke'), - "REVOKE EXECUTE ON PROCEDURE ALL_LANGS FROM SYSDBA") - self.assertEqual(c.get_sql_for('revoke',grantors=[]), - "REVOKE EXECUTE ON PROCEDURE ALL_LANGS FROM SYSDBA GRANTED BY SYSDBA") - self.assertEqual(c.get_sql_for('revoke',grantors=['SYSDBA','TEST_USER']), - "REVOKE EXECUTE ON PROCEDURE ALL_LANGS FROM SYSDBA") + "REVOKE EXECUTE ON PROCEDURE ALL_LANGS FROM SYSDBA") + self.assertEqual(c.get_sql_for('revoke', grantors=[]), + "REVOKE EXECUTE ON PROCEDURE ALL_LANGS FROM SYSDBA GRANTED BY SYSDBA") + self.assertEqual(c.get_sql_for('revoke', grantors=['SYSDBA', 'TEST_USER']), + "REVOKE EXECUTE ON PROCEDURE ALL_LANGS FROM SYSDBA") with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('revoke',grant_option=True) + c.get_sql_for('revoke', grant_option=True) self.assertTupleEqual(cm.exception.args, - ("Can't revoke grant option that wasn't granted.",)) + ("Can't revoke grant option that wasn't granted.",)) with self.assertRaises(fdb.ProgrammingError) as cm: - c.get_sql_for('revoke',badparam=True) + c.get_sql_for('revoke', badparam=True) self.assertTupleEqual(cm.exception.args, - ("Unsupported parameter(s) 'badparam'",)) + ("Unsupported parameter(s) 'badparam'",)) c = p.privileges[1] self.assertEqual(c.get_sql_for('grant'), - "GRANT EXECUTE ON PROCEDURE ALL_LANGS TO PUBLIC WITH GRANT OPTION") + "GRANT EXECUTE ON PROCEDURE ALL_LANGS TO PUBLIC WITH GRANT OPTION") self.assertEqual(c.get_sql_for('revoke'), - "REVOKE EXECUTE ON PROCEDURE ALL_LANGS FROM PUBLIC") - self.assertEqual(c.get_sql_for('revoke',grant_option=True), - "REVOKE GRANT OPTION FOR EXECUTE ON PROCEDURE ALL_LANGS FROM PUBLIC") + "REVOKE EXECUTE ON PROCEDURE ALL_LANGS FROM PUBLIC") + self.assertEqual(c.get_sql_for('revoke', grant_option=True), + "REVOKE GRANT OPTION FOR EXECUTE ON PROCEDURE ALL_LANGS FROM PUBLIC") # get_privileges_of() u = fdb.services.User('PUBLIC') p = self.con.schema.get_privileges_of(u) if self.con.ods <= fdb.ODS_FB_20: - self.assertEqual(len(p),66) + self.assertEqual(len(p), 66) elif self.con.ods >= fdb.ODS_FB_30: - self.assertEqual(len(p),115) + self.assertEqual(len(p), 115) else: - self.assertEqual(len(p),68) + self.assertEqual(len(p), 68) with self.assertRaises(fdb.ProgrammingError) as cm: p = self.con.schema.get_privileges_of('PUBLIC') self.assertTupleEqual(cm.exception.args, - ("Unknown user_type code.",)) + ("Unknown user_type code.",)) with self.assertRaises(fdb.ProgrammingError) as cm: - p = self.con.schema.get_privileges_of('PUBLIC',50) + p = self.con.schema.get_privileges_of('PUBLIC', 50) self.assertTupleEqual(cm.exception.args, - ("Unknown user_type code.",)) + ("Unknown user_type code.",)) # def testPrivilegeExtended(self): - def get_privilege(obj,privilege): + def get_privilege(obj, privilege): x = [x for x in obj.privileges if x.privilege == privilege] return x[0] - p = [] - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'X', - 'RDB$RELATION_NAME': 'ALL_LANGS', - 'RDB$OBJECT_TYPE': 5, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': None})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'X', - 'RDB$RELATION_NAME': 'ALL_LANGS', - 'RDB$OBJECT_TYPE': 5, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'X', - 'RDB$RELATION_NAME': 'ALL_LANGS', - 'RDB$OBJECT_TYPE': 5, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'TEST_ROLE', - 'RDB$PRIVILEGE': 'X', - 'RDB$RELATION_NAME': 'ALL_LANGS', - 'RDB$OBJECT_TYPE': 5, - 'RDB$USER_TYPE': 13, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'X', - 'RDB$RELATION_NAME': 'ALL_LANGS', - 'RDB$OBJECT_TYPE': 5, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'T_USER', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'D', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': 'CURRENCY', - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': 'COUNTRY', - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'D', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': 'COUNTRY', - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': 'CURRENCY', - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'D', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'COUNTRY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'D', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'D', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'ORG_CHART', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 5, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'D', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'D', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'ORG_CHART', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 5, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'X', - 'RDB$RELATION_NAME': 'ORG_CHART', - 'RDB$OBJECT_TYPE': 5, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': None})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'X', - 'RDB$RELATION_NAME': 'ORG_CHART', - 'RDB$OBJECT_TYPE': 5, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'D', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'D', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'PHONE_LIST', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': 'EMP_NO', - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'RDB$PAGES', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'RDB$PAGES', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'U', - 'RDB$RELATION_NAME': 'RDB$PAGES', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'D', - 'RDB$RELATION_NAME': 'RDB$PAGES', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'R', - 'RDB$RELATION_NAME': 'RDB$PAGES', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'RDB$PAGES', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SYSDBA', - 'RDB$PRIVILEGE': 'X', - 'RDB$RELATION_NAME': 'SHIP_ORDER', - 'RDB$OBJECT_TYPE': 5, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': None})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PUBLIC', - 'RDB$PRIVILEGE': 'X', - 'RDB$RELATION_NAME': 'SHIP_ORDER', - 'RDB$OBJECT_TYPE': 5, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 1})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'T_USER', - 'RDB$PRIVILEGE': 'M', - 'RDB$RELATION_NAME': 'TEST_ROLE', - 'RDB$OBJECT_TYPE': 13, - 'RDB$USER_TYPE': 8, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'SAVE_SALARY_CHANGE', - 'RDB$PRIVILEGE': 'I', - 'RDB$RELATION_NAME': 'SALARY_HISTORY', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 2, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PHONE_LIST', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'DEPARTMENT', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 1, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) - p.append(sm.Privilege(self.con.schema,{'RDB$USER': 'PHONE_LIST', - 'RDB$PRIVILEGE': 'S', - 'RDB$RELATION_NAME': 'EMPLOYEE', - 'RDB$OBJECT_TYPE': 0, - 'RDB$USER_TYPE': 1, - 'RDB$FIELD_NAME': None, - 'RDB$GRANTOR': 'SYSDBA', - 'RDB$GRANT_OPTION': 0})) + p = utils.ObjectList() + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'X', + 'RDB$RELATION_NAME': 'ALL_LANGS', + 'RDB$OBJECT_TYPE': 5, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': None})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'X', + 'RDB$RELATION_NAME': 'ALL_LANGS', + 'RDB$OBJECT_TYPE': 5, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'X', + 'RDB$RELATION_NAME': 'ALL_LANGS', + 'RDB$OBJECT_TYPE': 5, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'TEST_ROLE', + 'RDB$PRIVILEGE': 'X', + 'RDB$RELATION_NAME': 'ALL_LANGS', + 'RDB$OBJECT_TYPE': 5, + 'RDB$USER_TYPE': 13, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'X', + 'RDB$RELATION_NAME': 'ALL_LANGS', + 'RDB$OBJECT_TYPE': 5, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'T_USER', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'D', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': 'CURRENCY', + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': 'COUNTRY', + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'D', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': 'COUNTRY', + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': 'CURRENCY', + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'D', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'COUNTRY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'D', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'D', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'ORG_CHART', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 5, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'D', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'D', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'ORG_CHART', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 5, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'X', + 'RDB$RELATION_NAME': 'ORG_CHART', + 'RDB$OBJECT_TYPE': 5, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': None})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'X', + 'RDB$RELATION_NAME': 'ORG_CHART', + 'RDB$OBJECT_TYPE': 5, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'D', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'D', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'PHONE_LIST', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': 'EMP_NO', + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'RDB$PAGES', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'RDB$PAGES', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'U', + 'RDB$RELATION_NAME': 'RDB$PAGES', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'D', + 'RDB$RELATION_NAME': 'RDB$PAGES', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'R', + 'RDB$RELATION_NAME': 'RDB$PAGES', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'RDB$PAGES', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SYSDBA', + 'RDB$PRIVILEGE': 'X', + 'RDB$RELATION_NAME': 'SHIP_ORDER', + 'RDB$OBJECT_TYPE': 5, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': None})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PUBLIC', + 'RDB$PRIVILEGE': 'X', + 'RDB$RELATION_NAME': 'SHIP_ORDER', + 'RDB$OBJECT_TYPE': 5, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 1})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'T_USER', + 'RDB$PRIVILEGE': 'M', + 'RDB$RELATION_NAME': 'TEST_ROLE', + 'RDB$OBJECT_TYPE': 13, + 'RDB$USER_TYPE': 8, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'SAVE_SALARY_CHANGE', + 'RDB$PRIVILEGE': 'I', + 'RDB$RELATION_NAME': 'SALARY_HISTORY', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 2, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PHONE_LIST', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'DEPARTMENT', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 1, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) + p.append(sm.Privilege(self.con.schema, {'RDB$USER': 'PHONE_LIST', + 'RDB$PRIVILEGE': 'S', + 'RDB$RELATION_NAME': 'EMPLOYEE', + 'RDB$OBJECT_TYPE': 0, + 'RDB$USER_TYPE': 1, + 'RDB$FIELD_NAME': None, + 'RDB$GRANTOR': 'SYSDBA', + 'RDB$GRANT_OPTION': 0})) # self.con.schema.__dict__['_Schema__privileges'] = p # Table p = self.con.schema.get_table('COUNTRY') - self.assertEqual(len(p.privileges),19) - self.assertEqual(len([x for x in p.privileges if x.user_name == 'SYSDBA']),5) - self.assertEqual(len([x for x in p.privileges if x.user_name == 'PUBLIC']),5) - self.assertEqual(len([x for x in p.privileges if x.user_name == 'T_USER']),9) - # - self.assertTrue(get_privilege(p,'S').isselect()) - self.assertTrue(get_privilege(p,'I').isinsert()) - self.assertTrue(get_privilege(p,'U').isupdate()) - self.assertTrue(get_privilege(p,'D').isdelete()) - self.assertTrue(get_privilege(p,'R').isreference()) + self.assertEqual(len(p.privileges), 19) + self.assertEqual(len([x for x in p.privileges if x.user_name == 'SYSDBA']), 5) + self.assertEqual(len([x for x in p.privileges if x.user_name == 'PUBLIC']), 5) + self.assertEqual(len([x for x in p.privileges if x.user_name == 'T_USER']), 9) + # + self.assertTrue(get_privilege(p, 'S').isselect()) + self.assertTrue(get_privilege(p, 'I').isinsert()) + self.assertTrue(get_privilege(p, 'U').isupdate()) + self.assertTrue(get_privilege(p, 'D').isdelete()) + self.assertTrue(get_privilege(p, 'R').isreference()) # x = p.privileges[0] - self.assertIsInstance(x.subject,sm.Table) - self.assertEqual(x.subject.name,p.name) + self.assertIsInstance(x.subject, sm.Table) + self.assertEqual(x.subject.name, p.name) # TableColumn p = p.get_column('CURRENCY') - self.assertEqual(len(p.privileges),2) + self.assertEqual(len(p.privileges), 2) x = p.privileges[0] - self.assertIsInstance(x.subject,sm.Table) - self.assertEqual(x.field_name,p.name) + self.assertIsInstance(x.subject, sm.Table) + self.assertEqual(x.field_name, p.name) # View p = self.con.schema.get_view('PHONE_LIST') - self.assertEqual(len(p.privileges),11) - self.assertEqual(len([x for x in p.privileges if x.user_name == 'SYSDBA']),5) - self.assertEqual(len([x for x in p.privileges if x.user_name == 'PUBLIC']),6) + self.assertEqual(len(p.privileges), 11) + self.assertEqual(len([x for x in p.privileges if x.user_name == 'SYSDBA']), 5) + self.assertEqual(len([x for x in p.privileges if x.user_name == 'PUBLIC']), 6) # x = p.privileges[0] - self.assertIsInstance(x.subject,sm.View) - self.assertEqual(x.subject.name,p.name) + self.assertIsInstance(x.subject, sm.View) + self.assertEqual(x.subject.name, p.name) # ViewColumn p = p.get_column('EMP_NO') - self.assertEqual(len(p.privileges),1) + self.assertEqual(len(p.privileges), 1) x = p.privileges[0] - self.assertIsInstance(x.subject,sm.View) - self.assertEqual(x.field_name,p.name) + self.assertIsInstance(x.subject, sm.View) + self.assertEqual(x.field_name, p.name) # Procedure p = self.con.schema.get_procedure('ORG_CHART') - self.assertEqual(len(p.privileges),2) - self.assertEqual(len([x for x in p.privileges if x.user_name == 'SYSDBA']),1) - self.assertEqual(len([x for x in p.privileges if x.user_name == 'PUBLIC']),1) + self.assertEqual(len(p.privileges), 2) + self.assertEqual(len([x for x in p.privileges if x.user_name == 'SYSDBA']), 1) + self.assertEqual(len([x for x in p.privileges if x.user_name == 'PUBLIC']), 1) # x = p.privileges[0] self.assertFalse(x.has_grant()) - self.assertIsInstance(x.subject,sm.Procedure) - self.assertEqual(x.subject.name,p.name) + self.assertIsInstance(x.subject, sm.Procedure) + self.assertEqual(x.subject.name, p.name) # x = p.privileges[1] self.assertTrue(x.has_grant()) # Role p = self.con.schema.get_role('TEST_ROLE') - self.assertEqual(len(p.privileges),1) + self.assertEqual(len(p.privileges), 1) x = p.privileges[0] - self.assertIsInstance(x.user,sm.Role) - self.assertEqual(x.user.name,p.name) + self.assertIsInstance(x.user, sm.Role) + self.assertEqual(x.user.name, p.name) self.assertTrue(x.isexecute()) # Trigger as grantee p = self.con.schema.get_table('SALARY_HISTORY') x = p.privileges[0] - self.assertIsInstance(x.user,sm.Trigger) - self.assertEqual(x.user.name,'SAVE_SALARY_CHANGE') + self.assertIsInstance(x.user, sm.Trigger) + self.assertEqual(x.user.name, 'SAVE_SALARY_CHANGE') # View as grantee p = self.con.schema.get_view('PHONE_LIST') x = self.con.schema.get_privileges_of(p) - self.assertEqual(len(x),2) + self.assertEqual(len(x), 2) x = x[0] - self.assertIsInstance(x.user,sm.View) - self.assertEqual(x.user.name,'PHONE_LIST') + self.assertIsInstance(x.user, sm.View) + self.assertEqual(x.user.name, 'PHONE_LIST') # get_grants() self.assertListEqual(sm.get_grants(p.privileges), - ['GRANT REFERENCES(EMP_NO) ON PHONE_LIST TO PUBLIC', - 'GRANT DELETE, INSERT, REFERENCES, SELECT, UPDATE ON PHONE_LIST TO PUBLIC WITH GRANT OPTION', - 'GRANT DELETE, INSERT, REFERENCES, SELECT, UPDATE ON PHONE_LIST TO SYSDBA WITH GRANT OPTION']) + ['GRANT REFERENCES(EMP_NO) ON PHONE_LIST TO PUBLIC', + 'GRANT DELETE, INSERT, REFERENCES, SELECT, UPDATE ON PHONE_LIST TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE, INSERT, REFERENCES, SELECT, UPDATE ON PHONE_LIST TO SYSDBA WITH GRANT OPTION']) p = self.con.schema.get_table('COUNTRY') self.assertListEqual(sm.get_grants(p.privileges), - ['GRANT DELETE, INSERT, UPDATE ON COUNTRY TO PUBLIC', - 'GRANT REFERENCES, SELECT ON COUNTRY TO PUBLIC WITH GRANT OPTION', - 'GRANT DELETE, INSERT, REFERENCES, SELECT, UPDATE ON COUNTRY TO SYSDBA WITH GRANT OPTION', - 'GRANT DELETE, INSERT, REFERENCES(COUNTRY,CURRENCY), SELECT, UPDATE(COUNTRY,CURRENCY) ON COUNTRY TO T_USER']) + ['GRANT DELETE, INSERT, UPDATE ON COUNTRY TO PUBLIC', + 'GRANT REFERENCES, SELECT ON COUNTRY TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE, INSERT, REFERENCES, SELECT, UPDATE ON COUNTRY TO SYSDBA WITH GRANT OPTION', + 'GRANT DELETE, INSERT, REFERENCES(COUNTRY,CURRENCY), SELECT, UPDATE(COUNTRY,CURRENCY) ON COUNTRY TO T_USER']) p = self.con.schema.get_role('TEST_ROLE') - self.assertListEqual(sm.get_grants(p.privileges),['GRANT EXECUTE ON PROCEDURE ALL_LANGS TO TEST_ROLE WITH GRANT OPTION']) + self.assertListEqual(sm.get_grants(p.privileges), ['GRANT EXECUTE ON PROCEDURE ALL_LANGS TO TEST_ROLE WITH GRANT OPTION']) p = self.con.schema.get_table('SALARY_HISTORY') self.assertListEqual(sm.get_grants(p.privileges), - ['GRANT INSERT ON SALARY_HISTORY TO TRIGGER SAVE_SALARY_CHANGE']) + ['GRANT INSERT ON SALARY_HISTORY TO TRIGGER SAVE_SALARY_CHANGE']) p = self.con.schema.get_procedure('ORG_CHART') self.assertListEqual(sm.get_grants(p.privileges), - ['GRANT EXECUTE ON PROCEDURE ORG_CHART TO PUBLIC WITH GRANT OPTION', - 'GRANT EXECUTE ON PROCEDURE ORG_CHART TO SYSDBA']) + ['GRANT EXECUTE ON PROCEDURE ORG_CHART TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE ORG_CHART TO SYSDBA']) # def testPackage(self): if self.con.ods < fdb.ODS_FB_30: return c = self.con.schema.get_package('TEST') # common properties - self.assertEqual(c.name,'TEST') + self.assertEqual(c.name, 'TEST') self.assertIsNone(c.description) self.assertFalse(c.issystemobject()) self.assertListEqual(c.actions, - ['create', 'recreate', 'create_or_alter', 'alter', 'drop']) - self.assertEqual(c.get_quoted_name(),'TEST') - self.assertEqual(c.owner_name,'SYSDBA') - self.assertEqual(c.security_class,'SQL$575') - self.assertEqual(c.header,"""BEGIN + ['create', 'recreate', 'create_or_alter', 'alter', 'drop']) + self.assertEqual(c.get_quoted_name(), 'TEST') + self.assertEqual(c.owner_name, 'SYSDBA') + self.assertEqual(c.security_class, 'SQL$575') + self.assertEqual(c.header, """BEGIN PROCEDURE P1(I INT) RETURNS (O INT); -- public procedure FUNCTION F(X INT) RETURNS INT; END""") - self.assertEqual(c.body,"""BEGIN + self.assertEqual(c.body, """BEGIN FUNCTION F1(I INT) RETURNS INT; -- private function PROCEDURE P1(I INT) RETURNS (O INT) @@ -5195,18 +5358,18 @@ RETURN X+1; END END""") - self.assertListEqual(c.get_dependents(),[]) - self.assertEqual(len(c.get_dependencies()),1) + self.assertListEqual(c.get_dependents(), []) + self.assertEqual(len(c.get_dependencies()), 1) self.assertEqual(len(c.functions), 2) self.assertEqual(len(c.procedures), 1) # - self.assertEqual(c.get_sql_for('create'),"""CREATE PACKAGE TEST + self.assertEqual(c.get_sql_for('create'), """CREATE PACKAGE TEST AS BEGIN PROCEDURE P1(I INT) RETURNS (O INT); -- public procedure FUNCTION F(X INT) RETURNS INT; END""") - self.assertEqual(c.get_sql_for('create',body=True),"""CREATE PACKAGE BODY TEST + self.assertEqual(c.get_sql_for('create', body=True), """CREATE PACKAGE BODY TEST AS BEGIN FUNCTION F1(I INT) RETURNS INT; -- private function @@ -5228,15 +5391,15 @@ RETURN X+1; END END""") - self.assertEqual(c.get_sql_for('alter',header="FUNCTION F2(I INT) RETURNS INT;"), -"""ALTER PACKAGE TEST + self.assertEqual(c.get_sql_for('alter', header="FUNCTION F2(I INT) RETURNS INT;"), + """ALTER PACKAGE TEST AS BEGIN FUNCTION F2(I INT) RETURNS INT; END""") - self.assertEqual(c.get_sql_for('drop'),"""DROP PACKAGE TEST""") - self.assertEqual(c.get_sql_for('drop',body=True),"""DROP PACKAGE BODY TEST""") - self.assertEqual( c.get_sql_for('create_or_alter'),"""CREATE OR ALTER PACKAGE TEST + self.assertEqual(c.get_sql_for('drop'), """DROP PACKAGE TEST""") + self.assertEqual(c.get_sql_for('drop', body=True), """DROP PACKAGE BODY TEST""") + self.assertEqual(c.get_sql_for('create_or_alter'), """CREATE OR ALTER PACKAGE TEST AS BEGIN PROCEDURE P1(I INT) RETURNS (O INT); -- public procedure @@ -5244,11 +5407,11 @@ END""") # def testVisitor(self): - v = SchemaVisitor(self,'create',follow='dependencies') + v = SchemaVisitor(self, 'create', follow='dependencies') c = self.con.schema.get_procedure('ALL_LANGS') - c.accept_visitor(v) + c.accept(v) self.maxDiff = None - output = "CREATE TABLE JOB\n(\n JOB_CODE JOBCODE NOT NULL,\n" \ + output = "CREATE TABLE JOB (\n JOB_CODE JOBCODE NOT NULL,\n" \ " JOB_GRADE JOBGRADE NOT NULL,\n" \ " JOB_COUNTRY COUNTRYNAME NOT NULL,\n" \ " JOB_TITLE VARCHAR(25) NOT NULL,\n" \ @@ -5303,26 +5466,395 @@ "\t SUSPEND;\n" \ "\tEND\n" \ " END\n" - self.assertMultiLineEqual(self.output.getvalue(),output) + self.assertMultiLineEqual(self.output.getvalue(), output) - v = SchemaVisitor(self,'drop',follow='dependents') + v = SchemaVisitor(self, 'drop', follow='dependents') c = self.con.schema.get_table('JOB') self.clear_output() - c.accept_visitor(v) - self.assertEqual(self.output.getvalue(),"""DROP PROCEDURE ALL_LANGS + c.accept(v) + self.assertEqual(self.output.getvalue(), """DROP PROCEDURE ALL_LANGS DROP PROCEDURE SHOW_LANGS DROP TABLE JOB """) + def testScript(self): + self.maxDiff = None + self.assertEqual(25, len(sm.SCRIPT_DEFAULT_ORDER)) + s = self.con.schema + script = s.get_metadata_ddl([sm.SCRIPT_COLLATIONS]) + self.assertListEqual(script, ["""CREATE COLLATION TEST_COLLATE + FOR WIN1250 + FROM WIN_CZ + NO PAD + CASE INSENSITIVE + ACCENT INSENSITIVE + 'DISABLE-COMPRESSIONS=0;DISABLE-EXPANSIONS=0'"""]) + script = s.get_metadata_ddl([sm.SCRIPT_CHARACTER_SETS]) + self.assertListEqual(script, []) + script = s.get_metadata_ddl([sm.SCRIPT_UDFS]) + self.assertListEqual(script, []) + script = s.get_metadata_ddl([sm.SCRIPT_GENERATORS]) + self.assertListEqual(script, ['CREATE SEQUENCE EMP_NO_GEN', + 'CREATE SEQUENCE CUST_NO_GEN']) + script = s.get_metadata_ddl([sm.SCRIPT_EXCEPTIONS]) + self.assertListEqual(script, ["CREATE EXCEPTION UNKNOWN_EMP_ID 'Invalid employee number or project id.'", + "CREATE EXCEPTION REASSIGN_SALES 'Reassign the sales records before deleting this employee.'", + 'CREATE EXCEPTION ORDER_ALREADY_SHIPPED \'Order status is "shipped."\'', + "CREATE EXCEPTION CUSTOMER_ON_HOLD 'This customer is on hold.'", + "CREATE EXCEPTION CUSTOMER_CHECK 'Overdue balance -- can not ship.'"]) + script = s.get_metadata_ddl([sm.SCRIPT_DOMAINS]) + self.assertListEqual(script, ['CREATE DOMAIN "FIRSTNAME" AS VARCHAR(15)', + 'CREATE DOMAIN "LASTNAME" AS VARCHAR(20)', + 'CREATE DOMAIN PHONENUMBER AS VARCHAR(20)', + 'CREATE DOMAIN COUNTRYNAME AS VARCHAR(15)', + 'CREATE DOMAIN ADDRESSLINE AS VARCHAR(30)', + 'CREATE DOMAIN EMPNO AS SMALLINT', + "CREATE DOMAIN DEPTNO AS CHAR(3) CHECK (VALUE = '000' OR (VALUE > '0' AND VALUE <= '999') OR VALUE IS NULL)", + 'CREATE DOMAIN PROJNO AS CHAR(5) CHECK (VALUE = UPPER (VALUE))', + 'CREATE DOMAIN CUSTNO AS INTEGER CHECK (VALUE > 1000)', + "CREATE DOMAIN JOBCODE AS VARCHAR(5) CHECK (VALUE > '99999')", + 'CREATE DOMAIN JOBGRADE AS SMALLINT CHECK (VALUE BETWEEN 0 AND 6)', + 'CREATE DOMAIN SALARY AS NUMERIC(10, 2) DEFAULT 0 CHECK (VALUE > 0)', + 'CREATE DOMAIN BUDGET AS DECIMAL(12, 2) DEFAULT 50000 CHECK (VALUE > 10000 AND VALUE <= 2000000)', + "CREATE DOMAIN PRODTYPE AS VARCHAR(12) DEFAULT 'software' NOT NULL CHECK (VALUE IN ('software', 'hardware', 'other', 'N/A'))", + "CREATE DOMAIN PONUMBER AS CHAR(8) CHECK (VALUE STARTING WITH 'V')"]) + if self.version == FB30: + script = s.get_metadata_ddl([sm.SCRIPT_PACKAGE_DEFS]) + self.assertListEqual(script, ['CREATE PACKAGE TEST\nAS\nBEGIN\n PROCEDURE P1(I INT) RETURNS (O INT); -- public procedure\n FUNCTION F(X INT) RETURNS INT;\nEND', + 'CREATE PACKAGE TEST2\nAS\nBEGIN\n FUNCTION F3(X INT) RETURNS INT;\nEND']) + if self.version == FB30: + script = s.get_metadata_ddl([sm.SCRIPT_FUNCTION_DEFS]) + self.assertListEqual(script, ['CREATE FUNCTION F2 (X INTEGER)\nRETURNS INTEGER\nAS\nBEGIN\nEND', + 'CREATE FUNCTION FX (\n F TYPE OF "FIRSTNAME",\n L TYPE OF COLUMN CUSTOMER.CONTACT_LAST\n)\nRETURNS VARCHAR(35)\nAS\nBEGIN\nEND', + 'CREATE FUNCTION FN\nRETURNS INTEGER\nAS\nBEGIN\nEND']) + script = s.get_metadata_ddl([sm.SCRIPT_PROCEDURE_DEFS]) + if self.version == FB30: + self.assertListEqual(script, ['CREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT)\nRETURNS (PROJ_ID CHAR(5))\nAS\nBEGIN\n SUSPEND;\nEND', + 'CREATE PROCEDURE ADD_EMP_PROJ (\n EMP_NO SMALLINT,\n PROJ_ID CHAR(5)\n)\nAS\nBEGIN\n SUSPEND;\nEND', + 'CREATE PROCEDURE SUB_TOT_BUDGET (HEAD_DEPT CHAR(3))\nRETURNS (\n TOT_BUDGET DECIMAL(12, 2),\n AVG_BUDGET DECIMAL(12, 2),\n MIN_BUDGET DECIMAL(12, 2),\n MAX_BUDGET DECIMAL(12, 2)\n)\nAS\nBEGIN\n SUSPEND;\nEND', + 'CREATE PROCEDURE DELETE_EMPLOYEE (EMP_NUM INTEGER)\nAS\nBEGIN\n SUSPEND;\nEND', + 'CREATE PROCEDURE DEPT_BUDGET (DNO CHAR(3))\nRETURNS (TOT DECIMAL(12, 2))\nAS\nBEGIN\n SUSPEND;\nEND', + 'CREATE PROCEDURE ORG_CHART\nRETURNS (\n HEAD_DEPT CHAR(25),\n DEPARTMENT CHAR(25),\n MNGR_NAME CHAR(20),\n TITLE CHAR(5),\n EMP_CNT INTEGER\n)\nAS\nBEGIN\n SUSPEND;\nEND', + 'CREATE PROCEDURE MAIL_LABEL (CUST_NO INTEGER)\nRETURNS (\n LINE1 CHAR(40),\n LINE2 CHAR(40),\n LINE3 CHAR(40),\n LINE4 CHAR(40),\n LINE5 CHAR(40),\n LINE6 CHAR(40)\n)\nAS\nBEGIN\n SUSPEND;\nEND', + 'CREATE PROCEDURE SHIP_ORDER (PO_NUM CHAR(8))\nAS\nBEGIN\n SUSPEND;\nEND', + 'CREATE PROCEDURE SHOW_LANGS (\n CODE VARCHAR(5),\n GRADE SMALLINT,\n CTY VARCHAR(15)\n)\nRETURNS (LANGUAGES VARCHAR(15))\nAS\nBEGIN\n SUSPEND;\nEND', + 'CREATE PROCEDURE ALL_LANGS\nRETURNS (\n CODE VARCHAR(5),\n GRADE VARCHAR(5),\n COUNTRY VARCHAR(15),\n LANG VARCHAR(15)\n)\nAS\nBEGIN\n SUSPEND;\nEND']) + else: + self.assertListEqual(script, ['CREATE PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT)\nRETURNS (PROJ_ID CHAR(5))\nAS\nBEGIN\nEND', + 'CREATE PROCEDURE ADD_EMP_PROJ (\n EMP_NO SMALLINT,\n PROJ_ID CHAR(5)\n)\nAS\nBEGIN\nEND', + 'CREATE PROCEDURE SUB_TOT_BUDGET (HEAD_DEPT CHAR(3))\nRETURNS (\n TOT_BUDGET DECIMAL(12, 2),\n AVG_BUDGET DECIMAL(12, 2),\n MIN_BUDGET DECIMAL(12, 2),\n MAX_BUDGET DECIMAL(12, 2)\n)\nAS\nBEGIN\nEND', + 'CREATE PROCEDURE DELETE_EMPLOYEE (EMP_NUM INTEGER)\nAS\nBEGIN\nEND', + 'CREATE PROCEDURE DEPT_BUDGET (DNO CHAR(3))\nRETURNS (TOT DECIMAL(12, 2))\nAS\nBEGIN\nEND', + 'CREATE PROCEDURE ORG_CHART\nRETURNS (\n HEAD_DEPT CHAR(25),\n DEPARTMENT CHAR(25),\n MNGR_NAME CHAR(20),\n TITLE CHAR(5),\n EMP_CNT INTEGER\n)\nAS\nBEGIN\nEND', + 'CREATE PROCEDURE MAIL_LABEL (CUST_NO INTEGER)\nRETURNS (\n LINE1 CHAR(40),\n LINE2 CHAR(40),\n LINE3 CHAR(40),\n LINE4 CHAR(40),\n LINE5 CHAR(40),\n LINE6 CHAR(40)\n)\nAS\nBEGIN\nEND', + 'CREATE PROCEDURE SHIP_ORDER (PO_NUM CHAR(8))\nAS\nBEGIN\nEND', + 'CREATE PROCEDURE SHOW_LANGS (\n CODE VARCHAR(5),\n GRADE SMALLINT,\n CTY VARCHAR(15)\n)\nRETURNS (LANGUAGES VARCHAR(15))\nAS\nBEGIN\nEND', + 'CREATE PROCEDURE ALL_LANGS\nRETURNS (\n CODE VARCHAR(5),\n GRADE VARCHAR(5),\n COUNTRY VARCHAR(15),\n LANG VARCHAR(15)\n)\nAS\nBEGIN\nEND']) + script = s.get_metadata_ddl([sm.SCRIPT_TABLES]) + if self.version == FB30: + self.assertListEqual(script, ['CREATE TABLE COUNTRY (\n COUNTRY COUNTRYNAME NOT NULL,\n CURRENCY VARCHAR(10) NOT NULL\n)', + 'CREATE TABLE JOB (\n JOB_CODE JOBCODE NOT NULL,\n JOB_GRADE JOBGRADE NOT NULL,\n JOB_COUNTRY COUNTRYNAME NOT NULL,\n JOB_TITLE VARCHAR(25) NOT NULL,\n MIN_SALARY SALARY NOT NULL,\n MAX_SALARY SALARY NOT NULL,\n JOB_REQUIREMENT BLOB SUB_TYPE TEXT SEGMENT SIZE 400,\n LANGUAGE_REQ VARCHAR(15)[5]\n)', + "CREATE TABLE DEPARTMENT (\n DEPT_NO DEPTNO NOT NULL,\n DEPARTMENT VARCHAR(25) NOT NULL,\n HEAD_DEPT DEPTNO,\n MNGR_NO EMPNO,\n BUDGET BUDGET,\n LOCATION VARCHAR(15),\n PHONE_NO PHONENUMBER DEFAULT '555-1234'\n)", + 'CREATE TABLE EMPLOYEE (\n EMP_NO EMPNO NOT NULL,\n FIRST_NAME "FIRSTNAME" NOT NULL,\n LAST_NAME "LASTNAME" NOT NULL,\n PHONE_EXT VARCHAR(4),\n HIRE_DATE TIMESTAMP DEFAULT \'NOW\' NOT NULL,\n DEPT_NO DEPTNO NOT NULL,\n JOB_CODE JOBCODE NOT NULL,\n JOB_GRADE JOBGRADE NOT NULL,\n JOB_COUNTRY COUNTRYNAME NOT NULL,\n SALARY SALARY NOT NULL,\n FULL_NAME COMPUTED BY (last_name || \', \' || first_name)\n)', + 'CREATE TABLE CUSTOMER (\n CUST_NO CUSTNO NOT NULL,\n CUSTOMER VARCHAR(25) NOT NULL,\n CONTACT_FIRST "FIRSTNAME",\n CONTACT_LAST "LASTNAME",\n PHONE_NO PHONENUMBER,\n ADDRESS_LINE1 ADDRESSLINE,\n ADDRESS_LINE2 ADDRESSLINE,\n CITY VARCHAR(25),\n STATE_PROVINCE VARCHAR(15),\n COUNTRY COUNTRYNAME,\n POSTAL_CODE VARCHAR(12),\n ON_HOLD CHAR(1) DEFAULT NULL\n)', + 'CREATE TABLE PROJECT (\n PROJ_ID PROJNO NOT NULL,\n PROJ_NAME VARCHAR(20) NOT NULL,\n PROJ_DESC BLOB SUB_TYPE TEXT SEGMENT SIZE 800,\n TEAM_LEADER EMPNO,\n PRODUCT PRODTYPE\n)', + 'CREATE TABLE EMPLOYEE_PROJECT (\n EMP_NO EMPNO NOT NULL,\n PROJ_ID PROJNO NOT NULL\n)', + 'CREATE TABLE PROJ_DEPT_BUDGET (\n FISCAL_YEAR INTEGER NOT NULL,\n PROJ_ID PROJNO NOT NULL,\n DEPT_NO DEPTNO NOT NULL,\n QUART_HEAD_CNT INTEGER[4],\n PROJECTED_BUDGET BUDGET\n)', + "CREATE TABLE SALARY_HISTORY (\n EMP_NO EMPNO NOT NULL,\n CHANGE_DATE TIMESTAMP DEFAULT 'NOW' NOT NULL,\n UPDATER_ID VARCHAR(20) NOT NULL,\n OLD_SALARY SALARY NOT NULL,\n PERCENT_CHANGE DOUBLE PRECISION DEFAULT 0 NOT NULL,\n NEW_SALARY COMPUTED BY (old_salary + old_salary * percent_change / 100)\n)", + "CREATE TABLE SALES (\n PO_NUMBER PONUMBER NOT NULL,\n CUST_NO CUSTNO NOT NULL,\n SALES_REP EMPNO,\n ORDER_STATUS VARCHAR(7) DEFAULT 'new' NOT NULL,\n ORDER_DATE TIMESTAMP DEFAULT 'NOW' NOT NULL,\n SHIP_DATE TIMESTAMP,\n DATE_NEEDED TIMESTAMP,\n PAID CHAR(1) DEFAULT 'n',\n QTY_ORDERED INTEGER DEFAULT 1 NOT NULL,\n TOTAL_VALUE DECIMAL(9, 2) NOT NULL,\n DISCOUNT FLOAT DEFAULT 0 NOT NULL,\n ITEM_TYPE PRODTYPE,\n AGED COMPUTED BY (ship_date - order_date)\n)", + 'CREATE TABLE AR (\n C1 INTEGER,\n C2 INTEGER[4, 0:3, 2],\n C3 VARCHAR(15)[0:5, 2],\n C4 CHAR(5)[5],\n C5 TIMESTAMP[2],\n C6 TIME[2],\n C7 DECIMAL(10, 2)[2],\n C8 NUMERIC(10, 2)[2],\n C9 SMALLINT[2],\n C10 BIGINT[2],\n C11 FLOAT[2],\n C12 DOUBLE PRECISION[2],\n C13 DECIMAL(10, 1)[2],\n C14 DECIMAL(10, 5)[2],\n C15 DECIMAL(18, 5)[2],\n C16 BOOLEAN[3]\n)', + 'CREATE TABLE T2 (\n C1 SMALLINT,\n C2 INTEGER,\n C3 BIGINT,\n C4 CHAR(5),\n C5 VARCHAR(10),\n C6 DATE,\n C7 TIME,\n C8 TIMESTAMP,\n C9 BLOB SUB_TYPE TEXT SEGMENT SIZE 80,\n C10 NUMERIC(18, 2),\n C11 DECIMAL(18, 2),\n C12 FLOAT,\n C13 DOUBLE PRECISION,\n C14 NUMERIC(8, 4),\n C15 DECIMAL(8, 4),\n C16 BLOB SUB_TYPE BINARY SEGMENT SIZE 80,\n C17 BOOLEAN\n)', + 'CREATE TABLE T3 (\n C1 INTEGER,\n C2 CHAR(10) CHARACTER SET UTF8,\n C3 VARCHAR(10) CHARACTER SET UTF8,\n C4 BLOB SUB_TYPE TEXT SEGMENT SIZE 80 CHARACTER SET UTF8,\n C5 BLOB SUB_TYPE BINARY SEGMENT SIZE 80\n)', + 'CREATE TABLE T4 (\n C1 INTEGER,\n C_OCTETS CHAR(5) CHARACTER SET OCTETS,\n V_OCTETS VARCHAR(30) CHARACTER SET OCTETS,\n C_NONE CHAR(5),\n V_NONE VARCHAR(30),\n C_WIN1250 CHAR(5) CHARACTER SET WIN1250,\n V_WIN1250 VARCHAR(30) CHARACTER SET WIN1250,\n C_UTF8 CHAR(5) CHARACTER SET UTF8,\n V_UTF8 VARCHAR(30) CHARACTER SET UTF8\n)', + 'CREATE TABLE T5 (\n ID NUMERIC(10, 0) GENERATED BY DEFAULT AS IDENTITY,\n C1 VARCHAR(15),\n UQ BIGINT GENERATED BY DEFAULT AS IDENTITY (START WITH 100)\n)', 'CREATE TABLE T (\n C1 INTEGER NOT NULL\n)']) + else: + self.assertListEqual(script, ['CREATE TABLE COUNTRY (\n COUNTRY COUNTRYNAME NOT NULL,\n CURRENCY VARCHAR(10) NOT NULL\n)', + 'CREATE TABLE JOB (\n JOB_CODE JOBCODE NOT NULL,\n JOB_GRADE JOBGRADE NOT NULL,\n JOB_COUNTRY COUNTRYNAME NOT NULL,\n JOB_TITLE VARCHAR(25) NOT NULL,\n MIN_SALARY SALARY NOT NULL,\n MAX_SALARY SALARY NOT NULL,\n JOB_REQUIREMENT BLOB SUB_TYPE TEXT SEGMENT SIZE 400,\n LANGUAGE_REQ VARCHAR(15)[5]\n)', + "CREATE TABLE DEPARTMENT (\n DEPT_NO DEPTNO NOT NULL,\n DEPARTMENT VARCHAR(25) NOT NULL,\n HEAD_DEPT DEPTNO,\n MNGR_NO EMPNO,\n BUDGET BUDGET,\n LOCATION VARCHAR(15),\n PHONE_NO PHONENUMBER DEFAULT '555-1234'\n)", + 'CREATE TABLE EMPLOYEE (\n EMP_NO EMPNO NOT NULL,\n FIRST_NAME "FIRSTNAME" NOT NULL,\n LAST_NAME "LASTNAME" NOT NULL,\n PHONE_EXT VARCHAR(4),\n HIRE_DATE TIMESTAMP DEFAULT \'NOW\' NOT NULL,\n DEPT_NO DEPTNO NOT NULL,\n JOB_CODE JOBCODE NOT NULL,\n JOB_GRADE JOBGRADE NOT NULL,\n JOB_COUNTRY COUNTRYNAME NOT NULL,\n SALARY SALARY NOT NULL,\n FULL_NAME COMPUTED BY (last_name || \', \' || first_name)\n)', + 'CREATE TABLE CUSTOMER (\n CUST_NO CUSTNO NOT NULL,\n CUSTOMER VARCHAR(25) NOT NULL,\n CONTACT_FIRST "FIRSTNAME",\n CONTACT_LAST "LASTNAME",\n PHONE_NO PHONENUMBER,\n ADDRESS_LINE1 ADDRESSLINE,\n ADDRESS_LINE2 ADDRESSLINE,\n CITY VARCHAR(25),\n STATE_PROVINCE VARCHAR(15),\n COUNTRY COUNTRYNAME,\n POSTAL_CODE VARCHAR(12),\n ON_HOLD CHAR(1) DEFAULT NULL\n)', + 'CREATE TABLE T4 (\n C1 INTEGER,\n C_OCTETS CHAR(5) CHARACTER SET OCTETS,\n V_OCTETS VARCHAR(30) CHARACTER SET OCTETS,\n C_NONE CHAR(5),\n V_NONE VARCHAR(30),\n C_WIN1250 CHAR(5) CHARACTER SET WIN1250,\n V_WIN1250 VARCHAR(30) CHARACTER SET WIN1250,\n C_UTF8 CHAR(5) CHARACTER SET UTF8,\n V_UTF8 VARCHAR(30) CHARACTER SET UTF8\n)', + 'CREATE TABLE PROJECT (\n PROJ_ID PROJNO NOT NULL,\n PROJ_NAME VARCHAR(20) NOT NULL,\n PROJ_DESC BLOB SUB_TYPE TEXT SEGMENT SIZE 800,\n TEAM_LEADER EMPNO,\n PRODUCT PRODTYPE\n)', + 'CREATE TABLE EMPLOYEE_PROJECT (\n EMP_NO EMPNO NOT NULL,\n PROJ_ID PROJNO NOT NULL\n)', + 'CREATE TABLE PROJ_DEPT_BUDGET (\n FISCAL_YEAR INTEGER NOT NULL,\n PROJ_ID PROJNO NOT NULL,\n DEPT_NO DEPTNO NOT NULL,\n QUART_HEAD_CNT INTEGER[4],\n PROJECTED_BUDGET BUDGET\n)', + "CREATE TABLE SALARY_HISTORY (\n EMP_NO EMPNO NOT NULL,\n CHANGE_DATE TIMESTAMP DEFAULT 'NOW' NOT NULL,\n UPDATER_ID VARCHAR(20) NOT NULL,\n OLD_SALARY SALARY NOT NULL,\n PERCENT_CHANGE DOUBLE PRECISION DEFAULT 0 NOT NULL,\n NEW_SALARY COMPUTED BY (old_salary + old_salary * percent_change / 100)\n)", + "CREATE TABLE SALES (\n PO_NUMBER PONUMBER NOT NULL,\n CUST_NO CUSTNO NOT NULL,\n SALES_REP EMPNO,\n ORDER_STATUS VARCHAR(7) DEFAULT 'new' NOT NULL,\n ORDER_DATE TIMESTAMP DEFAULT 'NOW' NOT NULL,\n SHIP_DATE TIMESTAMP,\n DATE_NEEDED TIMESTAMP,\n PAID CHAR(1) DEFAULT 'n',\n QTY_ORDERED INTEGER DEFAULT 1 NOT NULL,\n TOTAL_VALUE DECIMAL(9, 2) NOT NULL,\n DISCOUNT FLOAT DEFAULT 0 NOT NULL,\n ITEM_TYPE PRODTYPE,\n AGED COMPUTED BY (ship_date - order_date)\n)", + 'CREATE TABLE T3 (\n C1 INTEGER,\n C2 CHAR(10) CHARACTER SET UTF8,\n C3 VARCHAR(10) CHARACTER SET UTF8,\n C4 BLOB SUB_TYPE TEXT SEGMENT SIZE 80 CHARACTER SET UTF8,\n C5 BLOB SUB_TYPE BINARY SEGMENT SIZE 80\n)', + 'CREATE TABLE T2 (\n C1 SMALLINT,\n C2 INTEGER,\n C3 BIGINT,\n C4 CHAR(5),\n C5 VARCHAR(10),\n C6 DATE,\n C7 TIME,\n C8 TIMESTAMP,\n C9 BLOB SUB_TYPE TEXT SEGMENT SIZE 80,\n C10 NUMERIC(18, 2),\n C11 DECIMAL(18, 2),\n C12 FLOAT,\n C13 DOUBLE PRECISION,\n C14 NUMERIC(8, 4),\n C15 DECIMAL(8, 4),\n C16 BLOB SUB_TYPE BINARY SEGMENT SIZE 80\n)', + 'CREATE TABLE AR (\n C1 INTEGER,\n C2 INTEGER[4, 0:3, 2],\n C3 VARCHAR(15)[0:5, 2],\n C4 CHAR(5)[5],\n C5 TIMESTAMP[2],\n C6 TIME[2],\n C7 DECIMAL(10, 2)[2],\n C8 NUMERIC(10, 2)[2],\n C9 SMALLINT[2],\n C10 BIGINT[2],\n C11 FLOAT[2],\n C12 DOUBLE PRECISION[2],\n C13 DECIMAL(10, 1)[2],\n C14 DECIMAL(10, 5)[2],\n C15 DECIMAL(18, 5)[2]\n)', + 'CREATE TABLE T (\n C1 INTEGER NOT NULL\n)']) + script = s.get_metadata_ddl([sm.SCRIPT_PRIMARY_KEYS]) + if self.version == FB30: + self.assertListEqual(script, ['ALTER TABLE COUNTRY ADD PRIMARY KEY (COUNTRY)', + 'ALTER TABLE JOB ADD PRIMARY KEY (JOB_CODE,JOB_GRADE,JOB_COUNTRY)', + 'ALTER TABLE DEPARTMENT ADD PRIMARY KEY (DEPT_NO)', + 'ALTER TABLE EMPLOYEE ADD PRIMARY KEY (EMP_NO)', + 'ALTER TABLE PROJECT ADD PRIMARY KEY (PROJ_ID)', + 'ALTER TABLE EMPLOYEE_PROJECT ADD PRIMARY KEY (EMP_NO,PROJ_ID)', + 'ALTER TABLE PROJ_DEPT_BUDGET ADD PRIMARY KEY (FISCAL_YEAR,PROJ_ID,DEPT_NO)', + 'ALTER TABLE SALARY_HISTORY ADD PRIMARY KEY (EMP_NO,CHANGE_DATE,UPDATER_ID)', + 'ALTER TABLE CUSTOMER ADD PRIMARY KEY (CUST_NO)', + 'ALTER TABLE SALES ADD PRIMARY KEY (PO_NUMBER)', + 'ALTER TABLE T5 ADD PRIMARY KEY (ID)', + 'ALTER TABLE T ADD PRIMARY KEY (C1)'],) + else: + self.assertListEqual(script, ['ALTER TABLE COUNTRY ADD PRIMARY KEY (COUNTRY)', + 'ALTER TABLE JOB ADD PRIMARY KEY (JOB_CODE,JOB_GRADE,JOB_COUNTRY)', + 'ALTER TABLE DEPARTMENT ADD PRIMARY KEY (DEPT_NO)', + 'ALTER TABLE EMPLOYEE ADD PRIMARY KEY (EMP_NO)', + 'ALTER TABLE PROJECT ADD PRIMARY KEY (PROJ_ID)', + 'ALTER TABLE EMPLOYEE_PROJECT ADD PRIMARY KEY (EMP_NO,PROJ_ID)', + 'ALTER TABLE PROJ_DEPT_BUDGET ADD PRIMARY KEY (FISCAL_YEAR,PROJ_ID,DEPT_NO)', + 'ALTER TABLE SALARY_HISTORY ADD PRIMARY KEY (EMP_NO,CHANGE_DATE,UPDATER_ID)', + 'ALTER TABLE CUSTOMER ADD PRIMARY KEY (CUST_NO)', + 'ALTER TABLE SALES ADD PRIMARY KEY (PO_NUMBER)', + 'ALTER TABLE T ADD PRIMARY KEY (C1)'],) + script = s.get_metadata_ddl([sm.SCRIPT_UNIQUE_CONSTRAINTS]) + self.assertListEqual(script, ['ALTER TABLE DEPARTMENT ADD UNIQUE (DEPARTMENT)', + 'ALTER TABLE PROJECT ADD UNIQUE (PROJ_NAME)']) + script = s.get_metadata_ddl([sm.SCRIPT_CHECK_CONSTRAINTS]) + #self.assertListEqual(script, ['ALTER TABLE JOB ADD CHECK (min_salary < max_salary)', + #'ALTER TABLE EMPLOYEE ADD CHECK ( salary >= (SELECT min_salary FROM job WHERE\n job.job_code = employee.job_code AND\n job.job_grade = employee.job_grade AND\n job.job_country = employee.job_country) AND\n salary <= (SELECT max_salary FROM job WHERE\n job.job_code = employee.job_code AND\n job.job_grade = employee.job_grade AND\n job.job_country = employee.job_country))', + #"ALTER TABLE CUSTOMER ADD CHECK (on_hold IS NULL OR on_hold = '*')", + #'ALTER TABLE PROJ_DEPT_BUDGET ADD CHECK (FISCAL_YEAR >= 1993)', + #'ALTER TABLE SALARY_HISTORY ADD CHECK (percent_change between -50 and 50)', + #'ALTER TABLE SALES ADD CHECK (total_value >= 0)', + #'ALTER TABLE SALES ADD CHECK (ship_date >= order_date OR ship_date IS NULL)', + #"ALTER TABLE SALES ADD CHECK (NOT (order_status = 'shipped' AND\n EXISTS (SELECT on_hold FROM customer\n WHERE customer.cust_no = sales.cust_no\n AND customer.on_hold = '*')))", + #'ALTER TABLE SALES ADD CHECK (date_needed > order_date OR date_needed IS NULL)', + #"ALTER TABLE SALES ADD CHECK (paid in ('y', 'n'))", + #"ALTER TABLE SALES ADD CHECK (NOT (order_status = 'shipped' AND ship_date IS NULL))", + #"ALTER TABLE SALES ADD CHECK (order_status in\n ('new', 'open', 'shipped', 'waiting'))", + #'ALTER TABLE SALES ADD CHECK (discount >= 0 AND discount <= 1)', + #'ALTER TABLE SALES ADD CHECK (qty_ordered >= 1)']) + script = s.get_metadata_ddl([sm.SCRIPT_FOREIGN_CONSTRAINTS]) + self.assertListEqual(script, ['ALTER TABLE JOB ADD FOREIGN KEY (JOB_COUNTRY)\n REFERENCES COUNTRY (COUNTRY)', + 'ALTER TABLE DEPARTMENT ADD FOREIGN KEY (HEAD_DEPT)\n REFERENCES DEPARTMENT (DEPT_NO)', + 'ALTER TABLE DEPARTMENT ADD FOREIGN KEY (MNGR_NO)\n REFERENCES EMPLOYEE (EMP_NO)', + 'ALTER TABLE EMPLOYEE ADD FOREIGN KEY (DEPT_NO)\n REFERENCES DEPARTMENT (DEPT_NO)', + 'ALTER TABLE EMPLOYEE ADD FOREIGN KEY (JOB_CODE,JOB_GRADE,JOB_COUNTRY)\n REFERENCES JOB (JOB_CODE,JOB_GRADE,JOB_COUNTRY)', + 'ALTER TABLE CUSTOMER ADD FOREIGN KEY (COUNTRY)\n REFERENCES COUNTRY (COUNTRY)', + 'ALTER TABLE PROJECT ADD FOREIGN KEY (TEAM_LEADER)\n REFERENCES EMPLOYEE (EMP_NO)', + 'ALTER TABLE EMPLOYEE_PROJECT ADD FOREIGN KEY (EMP_NO)\n REFERENCES EMPLOYEE (EMP_NO)', + 'ALTER TABLE EMPLOYEE_PROJECT ADD FOREIGN KEY (PROJ_ID)\n REFERENCES PROJECT (PROJ_ID)', + 'ALTER TABLE PROJ_DEPT_BUDGET ADD FOREIGN KEY (DEPT_NO)\n REFERENCES DEPARTMENT (DEPT_NO)', + 'ALTER TABLE PROJ_DEPT_BUDGET ADD FOREIGN KEY (PROJ_ID)\n REFERENCES PROJECT (PROJ_ID)', + 'ALTER TABLE SALARY_HISTORY ADD FOREIGN KEY (EMP_NO)\n REFERENCES EMPLOYEE (EMP_NO)', + 'ALTER TABLE SALES ADD FOREIGN KEY (CUST_NO)\n REFERENCES CUSTOMER (CUST_NO)', + 'ALTER TABLE SALES ADD FOREIGN KEY (SALES_REP)\n REFERENCES EMPLOYEE (EMP_NO)']) + script = s.get_metadata_ddl([sm.SCRIPT_INDICES]) + self.assertListEqual(script, ['CREATE ASCENDING INDEX MINSALX ON JOB (JOB_COUNTRY,MIN_SALARY)', + 'CREATE DESCENDING INDEX MAXSALX ON JOB (JOB_COUNTRY,MAX_SALARY)', + 'CREATE DESCENDING INDEX BUDGETX ON DEPARTMENT (BUDGET)', + 'CREATE ASCENDING INDEX NAMEX ON EMPLOYEE (LAST_NAME,FIRST_NAME)', + 'CREATE ASCENDING INDEX CUSTNAMEX ON CUSTOMER (CUSTOMER)', + 'CREATE ASCENDING INDEX CUSTREGION ON CUSTOMER (COUNTRY,CITY)', + 'CREATE UNIQUE ASCENDING INDEX PRODTYPEX ON PROJECT (PRODUCT,PROJ_NAME)', + 'CREATE ASCENDING INDEX UPDATERX ON SALARY_HISTORY (UPDATER_ID)', + 'CREATE DESCENDING INDEX CHANGEX ON SALARY_HISTORY (CHANGE_DATE)', + 'CREATE ASCENDING INDEX NEEDX ON SALES (DATE_NEEDED)', + 'CREATE ASCENDING INDEX SALESTATX ON SALES (ORDER_STATUS,PAID)', + 'CREATE DESCENDING INDEX QTYX ON SALES (ITEM_TYPE,QTY_ORDERED)']) + script = s.get_metadata_ddl([sm.SCRIPT_VIEWS]) + self.assertListEqual(script, ['CREATE VIEW PHONE_LIST (EMP_NO,FIRST_NAME,LAST_NAME,PHONE_EXT,LOCATION,PHONE_NO)\n AS\n SELECT\n emp_no, first_name, last_name, phone_ext, location, phone_no\n FROM employee, department\n WHERE employee.dept_no = department.dept_no']) + if self.version == FB30: + script = s.get_metadata_ddl([sm.SCRIPT_PACKAGE_BODIES]) + self.assertListEqual(script, ['CREATE PACKAGE BODY TEST\nAS\nBEGIN\n FUNCTION F1(I INT) RETURNS INT; -- private function\n\n PROCEDURE P1(I INT) RETURNS (O INT)\n AS\n BEGIN\n END\n\n FUNCTION F1(I INT) RETURNS INT\n AS\n BEGIN\n RETURN F(I)+10;\n END\n\n FUNCTION F(X INT) RETURNS INT\n AS\n BEGIN\n RETURN X+1;\n END\nEND', 'CREATE PACKAGE BODY TEST2\nAS\nBEGIN\n FUNCTION F3(X INT) RETURNS INT\n AS\n BEGIN\n RETURN TEST.F(X)+100+FN();\n END\nEND']) + script = s.get_metadata_ddl([sm.SCRIPT_FUNCTION_BODIES]) + self.assertListEqual(script, ['ALTER FUNCTION F2 (X INTEGER)\nRETURNS INTEGER\nAS\nBEGIN\n RETURN X+1;\nEND', + 'ALTER FUNCTION FX (\n F TYPE OF "FIRSTNAME",\n L TYPE OF COLUMN CUSTOMER.CONTACT_LAST\n)\nRETURNS VARCHAR(35)\nAS\nBEGIN\n RETURN L || \', \' || F;\nEND', + 'ALTER FUNCTION FN\nRETURNS INTEGER\nAS\nBEGIN\n RETURN 0;\nEND']) + script = s.get_metadata_ddl([sm.SCRIPT_PROCEDURE_BODIES]) + self.assertListEqual(script, ['ALTER PROCEDURE GET_EMP_PROJ (EMP_NO SMALLINT)\nRETURNS (PROJ_ID CHAR(5))\nAS\nBEGIN\n\tFOR SELECT proj_id\n\t\tFROM employee_project\n\t\tWHERE emp_no = :emp_no\n\t\tINTO :proj_id\n\tDO\n\t\tSUSPEND;\nEND', 'ALTER PROCEDURE ADD_EMP_PROJ (\n EMP_NO SMALLINT,\n PROJ_ID CHAR(5)\n)\nAS\nBEGIN\n\tBEGIN\n\tINSERT INTO employee_project (emp_no, proj_id) VALUES (:emp_no, :proj_id);\n\tWHEN SQLCODE -530 DO\n\t\tEXCEPTION unknown_emp_id;\n\tEND\n\tSUSPEND;\nEND', + 'ALTER PROCEDURE SUB_TOT_BUDGET (HEAD_DEPT CHAR(3))\nRETURNS (\n TOT_BUDGET DECIMAL(12, 2),\n AVG_BUDGET DECIMAL(12, 2),\n MIN_BUDGET DECIMAL(12, 2),\n MAX_BUDGET DECIMAL(12, 2)\n)\nAS\nBEGIN\n\tSELECT SUM(budget), AVG(budget), MIN(budget), MAX(budget)\n\t\tFROM department\n\t\tWHERE head_dept = :head_dept\n\t\tINTO :tot_budget, :avg_budget, :min_budget, :max_budget;\n\tSUSPEND;\nEND', + "ALTER PROCEDURE DELETE_EMPLOYEE (EMP_NUM INTEGER)\nAS\nDECLARE VARIABLE any_sales INTEGER;\nBEGIN\n\tany_sales = 0;\n\n\t/*\n\t *\tIf there are any sales records referencing this employee,\n\t *\tcan't delete the employee until the sales are re-assigned\n\t *\tto another employee or changed to NULL.\n\t */\n\tSELECT count(po_number)\n\tFROM sales\n\tWHERE sales_rep = :emp_num\n\tINTO :any_sales;\n\n\tIF (any_sales > 0) THEN\n\tBEGIN\n\t\tEXCEPTION reassign_sales;\n\t\tSUSPEND;\n\tEND\n\n\t/*\n\t *\tIf the employee is a manager, update the department.\n\t */\n\tUPDATE department\n\tSET mngr_no = NULL\n\tWHERE mngr_no = :emp_num;\n\n\t/*\n\t *\tIf the employee is a project leader, update project.\n\t */\n\tUPDATE project\n\tSET team_leader = NULL\n\tWHERE team_leader = :emp_num;\n\n\t/*\n\t *\tDelete the employee from any projects.\n\t */\n\tDELETE FROM employee_project\n\tWHERE emp_no = :emp_num;\n\n\t/*\n\t *\tDelete old salary records.\n\t */\n\tDELETE FROM salary_history\n\tWHERE emp_no = :emp_num;\n\n\t/*\n\t *\tDelete the employee.\n\t */\n\tDELETE FROM employee\n\tWHERE emp_no = :emp_num;\n\n\tSUSPEND;\nEND", + 'ALTER PROCEDURE DEPT_BUDGET (DNO CHAR(3))\nRETURNS (TOT DECIMAL(12, 2))\nAS\nDECLARE VARIABLE sumb DECIMAL(12, 2);\n\tDECLARE VARIABLE rdno CHAR(3);\n\tDECLARE VARIABLE cnt INTEGER;\nBEGIN\n\ttot = 0;\n\n\tSELECT budget FROM department WHERE dept_no = :dno INTO :tot;\n\n\tSELECT count(budget) FROM department WHERE head_dept = :dno INTO :cnt;\n\n\tIF (cnt = 0) THEN\n\t\tSUSPEND;\n\n\tFOR SELECT dept_no\n\t\tFROM department\n\t\tWHERE head_dept = :dno\n\t\tINTO :rdno\n\tDO\n\t\tBEGIN\n\t\t\tEXECUTE PROCEDURE dept_budget :rdno RETURNING_VALUES :sumb;\n\t\t\ttot = tot + sumb;\n\t\tEND\n\n\tSUSPEND;\nEND', + "ALTER PROCEDURE ORG_CHART\nRETURNS (\n HEAD_DEPT CHAR(25),\n DEPARTMENT CHAR(25),\n MNGR_NAME CHAR(20),\n TITLE CHAR(5),\n EMP_CNT INTEGER\n)\nAS\nDECLARE VARIABLE mngr_no INTEGER;\n\tDECLARE VARIABLE dno CHAR(3);\nBEGIN\n\tFOR SELECT h.department, d.department, d.mngr_no, d.dept_no\n\t\tFROM department d\n\t\tLEFT OUTER JOIN department h ON d.head_dept = h.dept_no\n\t\tORDER BY d.dept_no\n\t\tINTO :head_dept, :department, :mngr_no, :dno\n\tDO\n\tBEGIN\n\t\tIF (:mngr_no IS NULL) THEN\n\t\tBEGIN\n\t\t\tmngr_name = '--TBH--';\n\t\t\ttitle = '';\n\t\tEND\n\n\t\tELSE\n\t\t\tSELECT full_name, job_code\n\t\t\tFROM employee\n\t\t\tWHERE emp_no = :mngr_no\n\t\t\tINTO :mngr_name, :title;\n\n\t\tSELECT COUNT(emp_no)\n\t\tFROM employee\n\t\tWHERE dept_no = :dno\n\t\tINTO :emp_cnt;\n\n\t\tSUSPEND;\n\tEND\nEND", + "ALTER PROCEDURE MAIL_LABEL (CUST_NO INTEGER)\nRETURNS (\n LINE1 CHAR(40),\n LINE2 CHAR(40),\n LINE3 CHAR(40),\n LINE4 CHAR(40),\n LINE5 CHAR(40),\n LINE6 CHAR(40)\n)\nAS\nDECLARE VARIABLE customer\tVARCHAR(25);\n\tDECLARE VARIABLE first_name\t\tVARCHAR(15);\n\tDECLARE VARIABLE last_name\t\tVARCHAR(20);\n\tDECLARE VARIABLE addr1\t\tVARCHAR(30);\n\tDECLARE VARIABLE addr2\t\tVARCHAR(30);\n\tDECLARE VARIABLE city\t\tVARCHAR(25);\n\tDECLARE VARIABLE state\t\tVARCHAR(15);\n\tDECLARE VARIABLE country\tVARCHAR(15);\n\tDECLARE VARIABLE postcode\tVARCHAR(12);\n\tDECLARE VARIABLE cnt\t\tINTEGER;\nBEGIN\n\tline1 = '';\n\tline2 = '';\n\tline3 = '';\n\tline4 = '';\n\tline5 = '';\n\tline6 = '';\n\n\tSELECT customer, contact_first, contact_last, address_line1,\n\t\taddress_line2, city, state_province, country, postal_code\n\tFROM CUSTOMER\n\tWHERE cust_no = :cust_no\n\tINTO :customer, :first_name, :last_name, :addr1, :addr2,\n\t\t:city, :state, :country, :postcode;\n\n\tIF (customer IS NOT NULL) THEN\n\t\tline1 = customer;\n\tIF (first_name IS NOT NULL) THEN\n\t\tline2 = first_name || ' ' || last_name;\n\tELSE\n\t\tline2 = last_name;\n\tIF (addr1 IS NOT NULL) THEN\n\t\tline3 = addr1;\n\tIF (addr2 IS NOT NULL) THEN\n\t\tline4 = addr2;\n\n\tIF (country = 'USA') THEN\n\tBEGIN\n\t\tIF (city IS NOT NULL) THEN\n\t\t\tline5 = city || ', ' || state || ' ' || postcode;\n\t\tELSE\n\t\t\tline5 = state || ' ' || postcode;\n\tEND\n\tELSE\n\tBEGIN\n\t\tIF (city IS NOT NULL) THEN\n\t\t\tline5 = city || ', ' || state;\n\t\tELSE\n\t\t\tline5 = state;\n\t\tline6 = country || ' ' || postcode;\n\tEND\n\n\tSUSPEND;\nEND", + "ALTER PROCEDURE SHIP_ORDER (PO_NUM CHAR(8))\nAS\nDECLARE VARIABLE ord_stat CHAR(7);\n\tDECLARE VARIABLE hold_stat CHAR(1);\n\tDECLARE VARIABLE cust_no INTEGER;\n\tDECLARE VARIABLE any_po CHAR(8);\nBEGIN\n\tSELECT s.order_status, c.on_hold, c.cust_no\n\tFROM sales s, customer c\n\tWHERE po_number = :po_num\n\tAND s.cust_no = c.cust_no\n\tINTO :ord_stat, :hold_stat, :cust_no;\n\n\t/* This purchase order has been already shipped. */\n\tIF (ord_stat = 'shipped') THEN\n\tBEGIN\n\t\tEXCEPTION order_already_shipped;\n\t\tSUSPEND;\n\tEND\n\n\t/*\tCustomer is on hold. */\n\tELSE IF (hold_stat = '*') THEN\n\tBEGIN\n\t\tEXCEPTION customer_on_hold;\n\t\tSUSPEND;\n\tEND\n\n\t/*\n\t *\tIf there is an unpaid balance on orders shipped over 2 months ago,\n\t *\tput the customer on hold.\n\t */\n\tFOR SELECT po_number\n\t\tFROM sales\n\t\tWHERE cust_no = :cust_no\n\t\tAND order_status = 'shipped'\n\t\tAND paid = 'n'\n\t\tAND ship_date < CAST('NOW' AS TIMESTAMP) - 60\n\t\tINTO :any_po\n\tDO\n\tBEGIN\n\t\tEXCEPTION customer_check;\n\n\t\tUPDATE customer\n\t\tSET on_hold = '*'\n\t\tWHERE cust_no = :cust_no;\n\n\t\tSUSPEND;\n\tEND\n\n\t/*\n\t *\tShip the order.\n\t */\n\tUPDATE sales\n\tSET order_status = 'shipped', ship_date = 'NOW'\n\tWHERE po_number = :po_num;\n\n\tSUSPEND;\nEND", + "ALTER PROCEDURE SHOW_LANGS (\n CODE VARCHAR(5),\n GRADE SMALLINT,\n CTY VARCHAR(15)\n)\nRETURNS (LANGUAGES VARCHAR(15))\nAS\nDECLARE VARIABLE i INTEGER;\nBEGIN\n i = 1;\n WHILE (i <= 5) DO\n BEGIN\n SELECT language_req[:i] FROM joB\n WHERE ((job_code = :code) AND (job_grade = :grade) AND (job_country = :cty)\n AND (language_req IS NOT NULL))\n INTO :languages;\n IF (languages = ' ') THEN /* Prints 'NULL' instead of blanks */\n languages = 'NULL'; \n i = i +1;\n SUSPEND;\n END\nEND", + "ALTER PROCEDURE ALL_LANGS\nRETURNS (\n CODE VARCHAR(5),\n GRADE VARCHAR(5),\n COUNTRY VARCHAR(15),\n LANG VARCHAR(15)\n)\nAS\nBEGIN\n\tFOR SELECT job_code, job_grade, job_country FROM job \n\t\tINTO :code, :grade, :country\n\n\tDO\n\tBEGIN\n\t FOR SELECT languages FROM show_langs \n \t\t (:code, :grade, :country) INTO :lang DO\n\t SUSPEND;\n\t /* Put nice separators between rows */\n\t code = '=====';\n\t grade = '=====';\n\t country = '===============';\n\t lang = '==============';\n\t SUSPEND;\n\tEND\n END"]) + script = s.get_metadata_ddl([sm.SCRIPT_TRIGGERS]) + if self.version == FB30: + self.assertListEqual(script, ['CREATE TRIGGER SET_EMP_NO FOR EMPLOYEE ACTIVE\nBEFORE INSERT POSITION 0\nAS\nBEGIN\n if (new.emp_no is null) then\n new.emp_no = gen_id(emp_no_gen, 1);\nEND', + "CREATE TRIGGER SAVE_SALARY_CHANGE FOR EMPLOYEE ACTIVE\nAFTER UPDATE POSITION 0\nAS\nBEGIN\n IF (old.salary <> new.salary) THEN\n INSERT INTO salary_history\n (emp_no, change_date, updater_id, old_salary, percent_change)\n VALUES (\n old.emp_no,\n 'NOW',\n user,\n old.salary,\n (new.salary - old.salary) * 100 / old.salary);\nEND", + 'CREATE TRIGGER SET_CUST_NO FOR CUSTOMER ACTIVE\nBEFORE INSERT POSITION 0\nAS\nBEGIN\n if (new.cust_no is null) then\n new.cust_no = gen_id(cust_no_gen, 1);\nEND', + "CREATE TRIGGER POST_NEW_ORDER FOR SALES ACTIVE\nAFTER INSERT POSITION 0\nAS\nBEGIN\n POST_EVENT 'new_order';\nEND", + 'CREATE TRIGGER TR_CONNECT ACTIVE\nON CONNECT POSITION 0\nAS \nBEGIN \n /* enter trigger code here */ \nEND', + 'CREATE TRIGGER TR_MULTI FOR COUNTRY ACTIVE\nAFTER INSERT OR UPDATE OR DELETE POSITION 0\nAS \nBEGIN \n /* enter trigger code here */ \nEND']) + else: + self.assertListEqual(script, ['CREATE TRIGGER SET_EMP_NO FOR EMPLOYEE ACTIVE\nBEFORE INSERT POSITION 0\nAS\nBEGIN\n if (new.emp_no is null) then\n new.emp_no = gen_id(emp_no_gen, 1);\nEND', + "CREATE TRIGGER SAVE_SALARY_CHANGE FOR EMPLOYEE ACTIVE\nAFTER UPDATE POSITION 0\nAS\nBEGIN\n IF (old.salary <> new.salary) THEN\n INSERT INTO salary_history\n (emp_no, change_date, updater_id, old_salary, percent_change)\n VALUES (\n old.emp_no,\n 'NOW',\n user,\n old.salary,\n (new.salary - old.salary) * 100 / old.salary);\nEND", + 'CREATE TRIGGER SET_CUST_NO FOR CUSTOMER ACTIVE\nBEFORE INSERT POSITION 0\nAS\nBEGIN\n if (new.cust_no is null) then\n new.cust_no = gen_id(cust_no_gen, 1);\nEND', + "CREATE TRIGGER POST_NEW_ORDER FOR SALES ACTIVE\nAFTER INSERT POSITION 0\nAS\nBEGIN\n POST_EVENT 'new_order';\nEND", + 'CREATE TRIGGER TR_MULTI FOR COUNTRY ACTIVE\nAFTER INSERT OR UPDATE OR DELETE POSITION 0\nAS \nBEGIN \n /* enter trigger code here */ \nEND', + 'CREATE TRIGGER TR_CONNECT ACTIVE\nON CONNECT POSITION 0\nAS \nBEGIN \n /* enter trigger code here */ \nEND']) + script = s.get_metadata_ddl([sm.SCRIPT_ROLES]) + self.assertListEqual(script, ['CREATE ROLE TEST_ROLE']) + script = s.get_metadata_ddl([sm.SCRIPT_GRANTS]) + self.assertListEqual(script, ['GRANT SELECT ON COUNTRY TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON COUNTRY TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON COUNTRY TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON COUNTRY TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON COUNTRY TO PUBLIC WITH GRANT OPTION', + 'GRANT SELECT ON JOB TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON JOB TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON JOB TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON JOB TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON JOB TO PUBLIC WITH GRANT OPTION', + 'GRANT SELECT ON DEPARTMENT TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON DEPARTMENT TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON DEPARTMENT TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON DEPARTMENT TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON DEPARTMENT TO PUBLIC WITH GRANT OPTION', + 'GRANT SELECT ON EMPLOYEE TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON EMPLOYEE TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON EMPLOYEE TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON EMPLOYEE TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON EMPLOYEE TO PUBLIC WITH GRANT OPTION', + 'GRANT SELECT ON PHONE_LIST TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON PHONE_LIST TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON PHONE_LIST TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON PHONE_LIST TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON PHONE_LIST TO PUBLIC WITH GRANT OPTION', + 'GRANT SELECT ON PROJECT TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON PROJECT TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON PROJECT TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON PROJECT TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON PROJECT TO PUBLIC WITH GRANT OPTION', + 'GRANT SELECT ON EMPLOYEE_PROJECT TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON EMPLOYEE_PROJECT TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON EMPLOYEE_PROJECT TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON EMPLOYEE_PROJECT TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON EMPLOYEE_PROJECT TO PUBLIC WITH GRANT OPTION', + 'GRANT SELECT ON PROJ_DEPT_BUDGET TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON PROJ_DEPT_BUDGET TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON PROJ_DEPT_BUDGET TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON PROJ_DEPT_BUDGET TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON PROJ_DEPT_BUDGET TO PUBLIC WITH GRANT OPTION', + 'GRANT SELECT ON SALARY_HISTORY TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON SALARY_HISTORY TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON SALARY_HISTORY TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON SALARY_HISTORY TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON SALARY_HISTORY TO PUBLIC WITH GRANT OPTION', + 'GRANT SELECT ON CUSTOMER TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON CUSTOMER TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON CUSTOMER TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON CUSTOMER TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON CUSTOMER TO PUBLIC WITH GRANT OPTION', + 'GRANT SELECT ON SALES TO PUBLIC WITH GRANT OPTION', + 'GRANT INSERT ON SALES TO PUBLIC WITH GRANT OPTION', + 'GRANT UPDATE ON SALES TO PUBLIC WITH GRANT OPTION', + 'GRANT DELETE ON SALES TO PUBLIC WITH GRANT OPTION', + 'GRANT REFERENCES ON SALES TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE GET_EMP_PROJ TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE ADD_EMP_PROJ TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE SUB_TOT_BUDGET TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE DELETE_EMPLOYEE TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE DEPT_BUDGET TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE ORG_CHART TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE MAIL_LABEL TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE SHIP_ORDER TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE SHOW_LANGS TO PUBLIC WITH GRANT OPTION', + 'GRANT EXECUTE ON PROCEDURE ALL_LANGS TO PUBLIC WITH GRANT OPTION']) + script = s.get_metadata_ddl([sm.SCRIPT_COMMENTS]) + self.assertListEqual(script, ["COMMENT ON CHARACTER SET NONE IS 'Comment on NONE character set'"]) + script = s.get_metadata_ddl([sm.SCRIPT_SHADOWS]) + self.assertListEqual(script, []) + script = s.get_metadata_ddl([sm.SCRIPT_INDEX_DEACTIVATIONS]) + if self.version == FB30: + self.assertListEqual(script, ['ALTER INDEX MINSALX INACTIVE', + 'ALTER INDEX MAXSALX INACTIVE', + 'ALTER INDEX BUDGETX INACTIVE', + 'ALTER INDEX NAMEX INACTIVE', + 'ALTER INDEX PRODTYPEX INACTIVE', + 'ALTER INDEX UPDATERX INACTIVE', + 'ALTER INDEX CHANGEX INACTIVE', + 'ALTER INDEX CUSTNAMEX INACTIVE', + 'ALTER INDEX CUSTREGION INACTIVE', + 'ALTER INDEX NEEDX INACTIVE', + 'ALTER INDEX SALESTATX INACTIVE', + 'ALTER INDEX QTYX INACTIVE']) + else: + self.assertListEqual(script, ['ALTER INDEX NEEDX INACTIVE', + 'ALTER INDEX SALESTATX INACTIVE', + 'ALTER INDEX QTYX INACTIVE', + 'ALTER INDEX UPDATERX INACTIVE', + 'ALTER INDEX CHANGEX INACTIVE', + 'ALTER INDEX PRODTYPEX INACTIVE', + 'ALTER INDEX CUSTNAMEX INACTIVE', + 'ALTER INDEX CUSTREGION INACTIVE', + 'ALTER INDEX NAMEX INACTIVE', + 'ALTER INDEX BUDGETX INACTIVE', + 'ALTER INDEX MINSALX INACTIVE', + 'ALTER INDEX MAXSALX INACTIVE']) + script = s.get_metadata_ddl([sm.SCRIPT_INDEX_ACTIVATIONS]) + if self.version == FB30: + self.assertListEqual(script, ['ALTER INDEX MINSALX ACTIVE', + 'ALTER INDEX MAXSALX ACTIVE', + 'ALTER INDEX BUDGETX ACTIVE', + 'ALTER INDEX NAMEX ACTIVE', + 'ALTER INDEX PRODTYPEX ACTIVE', + 'ALTER INDEX UPDATERX ACTIVE', + 'ALTER INDEX CHANGEX ACTIVE', + 'ALTER INDEX CUSTNAMEX ACTIVE', + 'ALTER INDEX CUSTREGION ACTIVE', + 'ALTER INDEX NEEDX ACTIVE', + 'ALTER INDEX SALESTATX ACTIVE', + 'ALTER INDEX QTYX ACTIVE']) + else: + self.assertListEqual(script, ['ALTER INDEX NEEDX ACTIVE', + 'ALTER INDEX SALESTATX ACTIVE', + 'ALTER INDEX QTYX ACTIVE', + 'ALTER INDEX UPDATERX ACTIVE', + 'ALTER INDEX CHANGEX ACTIVE', + 'ALTER INDEX PRODTYPEX ACTIVE', + 'ALTER INDEX CUSTNAMEX ACTIVE', + 'ALTER INDEX CUSTREGION ACTIVE', + 'ALTER INDEX NAMEX ACTIVE', + 'ALTER INDEX BUDGETX ACTIVE', + 'ALTER INDEX MINSALX ACTIVE', + 'ALTER INDEX MAXSALX ACTIVE']) + script = s.get_metadata_ddl([sm.SCRIPT_SET_GENERATORS]) + self.assertListEqual(script, ['ALTER SEQUENCE EMP_NO_GEN RESTART WITH 145', + 'ALTER SEQUENCE CUST_NO_GEN RESTART WITH 1015']) + script = s.get_metadata_ddl([sm.SCRIPT_TRIGGER_DEACTIVATIONS]) + if self.version == FB30: + self.assertListEqual(script, ['ALTER TRIGGER SET_EMP_NO INACTIVE', + 'ALTER TRIGGER SAVE_SALARY_CHANGE INACTIVE', + 'ALTER TRIGGER SET_CUST_NO INACTIVE', + 'ALTER TRIGGER POST_NEW_ORDER INACTIVE', + 'ALTER TRIGGER TR_CONNECT INACTIVE', + 'ALTER TRIGGER TR_MULTI INACTIVE']) + else: + self.assertListEqual(script, ['ALTER TRIGGER SET_EMP_NO INACTIVE', + 'ALTER TRIGGER SAVE_SALARY_CHANGE INACTIVE', + 'ALTER TRIGGER SET_CUST_NO INACTIVE', + 'ALTER TRIGGER POST_NEW_ORDER INACTIVE', + 'ALTER TRIGGER TR_MULTI INACTIVE', + 'ALTER TRIGGER TR_CONNECT INACTIVE']) + script = s.get_metadata_ddl([sm.SCRIPT_TRIGGER_ACTIVATIONS]) + if self.version == FB30: + self.assertListEqual(script, ['ALTER TRIGGER SET_EMP_NO ACTIVE', + 'ALTER TRIGGER SAVE_SALARY_CHANGE ACTIVE', + 'ALTER TRIGGER SET_CUST_NO ACTIVE', + 'ALTER TRIGGER POST_NEW_ORDER ACTIVE', + 'ALTER TRIGGER TR_CONNECT ACTIVE', + 'ALTER TRIGGER TR_MULTI ACTIVE']) + else: + self.assertListEqual(script, ['ALTER TRIGGER SET_EMP_NO ACTIVE', + 'ALTER TRIGGER SAVE_SALARY_CHANGE ACTIVE', + 'ALTER TRIGGER SET_CUST_NO ACTIVE', + 'ALTER TRIGGER POST_NEW_ORDER ACTIVE', + 'ALTER TRIGGER TR_MULTI ACTIVE', + 'ALTER TRIGGER TR_CONNECT ACTIVE']) class TestMonitor(FDBTestBase): def setUp(self): - super(TestMonitor,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) - self.con = fdb.connect(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + super(TestMonitor, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + self.con = fdb.connect(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) def tearDown(self): self.con.close() def testMonitorBindClose(self): @@ -5332,7 +5864,7 @@ self.assertTrue(s.closed) s.bind(self.con) # properties - self.assertEqual(s.db.name.upper(),self.dbfile.upper()) + self.assertEqual(s.db.name.upper(), self.dbfile.upper()) self.assertFalse(s.db.read_only) self.assertFalse(s.closed) # @@ -5355,33 +5887,33 @@ m = self.con.monitor m.refresh() self.assertIsNotNone(m.db) - self.assertIsInstance(m.db,fdb.monitor.DatabaseInfo) - self.assertGreater(len(m.attachments),0) - self.assertIsInstance(m.attachments[0],fdb.monitor.AttachmentInfo) - self.assertGreater(len(m.transactions),0) - self.assertIsInstance(m.transactions[0],fdb.monitor.TransactionInfo) - self.assertGreater(len(m.statements),0) - self.assertIsInstance(m.statements[0],fdb.monitor.StatementInfo) - self.assertEqual(len(m.callstack),0) - self.assertGreater(len(m.iostats),0) - self.assertIsInstance(m.iostats[0],fdb.monitor.IOStatsInfo) + self.assertIsInstance(m.db, fdb.monitor.DatabaseInfo) + self.assertGreater(len(m.attachments), 0) + self.assertIsInstance(m.attachments[0], fdb.monitor.AttachmentInfo) + self.assertGreater(len(m.transactions), 0) + self.assertIsInstance(m.transactions[0], fdb.monitor.TransactionInfo) + self.assertGreater(len(m.statements), 0) + self.assertIsInstance(m.statements[0], fdb.monitor.StatementInfo) + self.assertEqual(len(m.callstack), 0) + self.assertGreater(len(m.iostats), 0) + self.assertIsInstance(m.iostats[0], fdb.monitor.IOStatsInfo) if self.con.ods == fdb.ODS_FB_21: - self.assertEqual(len(m.variables),0) + self.assertEqual(len(m.variables), 0) elif self.con.ods >= fdb.ODS_FB_25: - self.assertGreater(len(m.variables),0) - self.assertIsInstance(m.variables[0],fdb.monitor.ContextVariableInfo) + self.assertGreater(len(m.variables), 0) + self.assertIsInstance(m.variables[0], fdb.monitor.ContextVariableInfo) # att_id = m._con.db_info(fdb.isc_info_attachment_id) - self.assertEqual(m.get_attachment(att_id).id,att_id) + self.assertEqual(m.get_attachment(att_id).id, att_id) tra_id = m._con.trans_info(fdb.isc_info_tra_id) - self.assertEqual(m.get_transaction(tra_id).id,tra_id) + self.assertEqual(m.get_transaction(tra_id).id, tra_id) stmt_id = None for stmt in m.statements: if stmt.sql_text == sql: stmt_id = stmt.id - self.assertEqual(m.get_statement(stmt_id).id,stmt_id) + self.assertEqual(m.get_statement(stmt_id).id, stmt_id) # m.get_call() - self.assertIsInstance(m.this_attachment,fdb.monitor.AttachmentInfo) + self.assertIsInstance(m.this_attachment, fdb.monitor.AttachmentInfo) self.assertEqual(m.this_attachment.id, self.con.db_info(fdb.isc_info_attachment_id)) self.assertFalse(m.closed) @@ -5399,48 +5931,48 @@ return m = self.con.monitor m.refresh() - self.assertEqual(m.db.name.upper(),self.dbfile.upper()) + self.assertEqual(m.db.name.upper(), self.dbfile.upper()) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(m.db.page_size,4096) + self.assertEqual(m.db.page_size, 4096) else: - self.assertEqual(m.db.page_size,8192) + self.assertEqual(m.db.page_size, 8192) if self.con.ods == fdb.ODS_FB_20: - self.assertEqual(m.db.ods,11.0) + self.assertEqual(m.db.ods, 11.0) elif self.con.ods == fdb.ODS_FB_21: - self.assertEqual(m.db.ods,11.1) + self.assertEqual(m.db.ods, 11.1) elif self.con.ods == fdb.ODS_FB_25: - self.assertEqual(m.db.ods,11.2) + self.assertEqual(m.db.ods, 11.2) elif self.con.ods >= fdb.ODS_FB_30: - self.assertEqual(m.db.ods,12.0) - self.assertIsInstance(m.db.oit,int) - self.assertIsInstance(m.db.oat,int) - self.assertIsInstance(m.db.ost,int) - self.assertIsInstance(m.db.next_transaction,int) - self.assertIsInstance(m.db.cache_size,int) - self.assertEqual(m.db.sql_dialect,3) - self.assertEqual(m.db.shutdown_mode,fdb.monitor.SHUTDOWN_MODE_ONLINE) - self.assertEqual(m.db.sweep_interval,20000) + self.assertEqual(m.db.ods, 12.0) + self.assertIsInstance(m.db.oit, int) + self.assertIsInstance(m.db.oat, int) + self.assertIsInstance(m.db.ost, int) + self.assertIsInstance(m.db.next_transaction, int) + self.assertIsInstance(m.db.cache_size, int) + self.assertEqual(m.db.sql_dialect, 3) + self.assertEqual(m.db.shutdown_mode, fdb.monitor.SHUTDOWN_MODE_ONLINE) + self.assertEqual(m.db.sweep_interval, 20000) self.assertFalse(m.db.read_only) self.assertTrue(m.db.forced_writes) self.assertTrue(m.db.reserve_space) - self.assertIsInstance(m.db.created,datetime.datetime) - self.assertIsInstance(m.db.pages,int) - self.assertEqual(m.db.backup_state,fdb.monitor.BACKUP_STATE_NORMAL) + self.assertIsInstance(m.db.created, datetime.datetime) + self.assertIsInstance(m.db.pages, int) + self.assertEqual(m.db.backup_state, fdb.monitor.BACKUP_STATE_NORMAL) if self.con.ods < fdb.ODS_FB_30: self.assertIsNone(m.db.crypt_page) self.assertIsNone(m.db.owner) self.assertIsNone(m.db.security_database) else: - self.assertEqual(m.db.crypt_page,0) - self.assertEqual(m.db.owner,'SYSDBA') - self.assertEqual(m.db.security_database,'Default') - self.assertEqual(m.db.iostats.group,fdb.monitor.STAT_DATABASE) - self.assertEqual(m.db.iostats.stat_id,m.db.stat_id) - self.assertIsInstance(m.db.tablestats,types.DictionaryType) + self.assertEqual(m.db.crypt_page, 0) + self.assertEqual(m.db.owner, 'SYSDBA') + self.assertEqual(m.db.security_database, 'Default') + self.assertEqual(m.db.iostats.group, fdb.monitor.STAT_DATABASE) + self.assertEqual(m.db.iostats.stat_id, m.db.stat_id) + self.assertIsInstance(m.db.tablestats, dict) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(m.db.tablestats),0) + self.assertEqual(len(m.db.tablestats), 0) else: - self.assertGreater(len(m.db.tablestats),0) + self.assertGreater(len(m.db.tablestats), 0) def testAttachmentInfo(self): if self.con.ods < fdb.ODS_FB_21: return @@ -5452,25 +5984,25 @@ m.refresh() s = m.this_attachment # - self.assertEqual(s.id,self.con.db_info(fdb.isc_info_attachment_id)) - self.assertIsInstance(s.server_pid,int) - self.assertIn(s.state,[fdb.monitor.STATE_ACTIVE,fdb.monitor.STATE_IDLE]) - self.assertEqual(s.name.upper(),self.dbfile.upper()) - self.assertEqual(s.user,'SYSDBA') - self.assertEqual(s.role,'NONE') + self.assertEqual(s.id, self.con.db_info(fdb.isc_info_attachment_id)) + self.assertIsInstance(s.server_pid, int) + self.assertIn(s.state, [fdb.monitor.STATE_ACTIVE, fdb.monitor.STATE_IDLE]) + self.assertEqual(s.name.upper(), self.dbfile.upper()) + self.assertEqual(s.user, 'SYSDBA') + self.assertEqual(s.role, 'NONE') if not FBTEST_HOST and self.con.engine_version >= 3.0: self.assertIsNone(s.remote_protocol) self.assertIsNone(s.remote_address) self.assertIsNone(s.remote_pid) self.assertIsNone(s.remote_process) else: - self.assertIn(s.remote_protocol,['XNET','TCPv4','TCPv6']) - self.assertIsInstance(s.remote_address,str) - self.assertIsInstance(s.remote_pid,int) - self.assertIsInstance(s.remote_process,str) - self.assertIsInstance(s.character_set,sm.CharacterSet) - self.assertIsInstance(s.timestamp,datetime.datetime) - self.assertIsInstance(s.transactions,list) + self.assertIn(s.remote_protocol, ['XNET', 'TCPv4', 'TCPv6']) + self.assertIsInstance(s.remote_address, str) + self.assertIsInstance(s.remote_pid, int) + self.assertIsInstance(s.remote_process, str) + self.assertIsInstance(s.character_set, sm.CharacterSet) + self.assertIsInstance(s.timestamp, datetime.datetime) + self.assertIsInstance(s.transactions, list) if self.con.ods < fdb.ODS_FB_30: self.assertIsNone(s.auth_method) self.assertIsNone(s.client_version) @@ -5478,29 +6010,29 @@ self.assertIsNone(s.remote_os_user) self.assertIsNone(s.remote_host) else: - self.assertIn(s.auth_method,['Srp','Win_Sspi','Legacy_Auth']) - self.assertIsInstance(s.client_version,str) - self.assertEqual(s.remote_version,'P13') - self.assertIsInstance(s.remote_os_user,str) - self.assertIsInstance(s.remote_host,str) + self.assertIn(s.auth_method, ['Srp', 'Win_Sspi', 'Legacy_Auth']) + self.assertIsInstance(s.client_version, str) + self.assertEqual(s.remote_version, 'P15') # Firebird 3.0.3, may fail with other versions + self.assertIsInstance(s.remote_os_user, str) + self.assertIsInstance(s.remote_host, str) for x in s.transactions: - self.assertIsInstance(x,fdb.monitor.TransactionInfo) - self.assertIsInstance(s.statements,list) + self.assertIsInstance(x, fdb.monitor.TransactionInfo) + self.assertIsInstance(s.statements, list) for x in s.statements: - self.assertIsInstance(x,fdb.monitor.StatementInfo) - self.assertIsInstance(s.variables,list) + self.assertIsInstance(x, fdb.monitor.StatementInfo) + self.assertIsInstance(s.variables, list) if self.con.ods >= fdb.ODS_FB_25: - self.assertGreater(len(s.variables),0) + self.assertGreater(len(s.variables), 0) else: - self.assertEqual(len(s.variables),0) + self.assertEqual(len(s.variables), 0) for x in s.variables: - self.assertIsInstance(x,fdb.monitor.ContextVariableInfo) - self.assertEqual(s.iostats.group,fdb.monitor.STAT_ATTACHMENT) - self.assertEqual(s.iostats.stat_id,s.stat_id) + self.assertIsInstance(x, fdb.monitor.ContextVariableInfo) + self.assertEqual(s.iostats.group, fdb.monitor.STAT_ATTACHMENT) + self.assertEqual(s.iostats.stat_id, s.stat_id) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(m.db.tablestats),0) + self.assertEqual(len(m.db.tablestats), 0) else: - self.assertGreater(len(m.db.tablestats),0) + self.assertGreater(len(m.db.tablestats), 0) # self.assertTrue(s.isactive()) self.assertFalse(s.isidle()) @@ -5517,25 +6049,25 @@ m.refresh() s = m.this_attachment.transactions[0] # - self.assertEqual(s.id,m._ic.transaction.trans_info(fdb.isc_info_tra_id)) - self.assertIs(s.attachment,m.this_attachment) - self.assertIn(s.state,[fdb.monitor.STATE_ACTIVE,fdb.monitor.STATE_IDLE]) - self.assertIsInstance(s.timestamp,datetime.datetime) - self.assertIsInstance(s.top,int) - self.assertIsInstance(s.oldest,int) - self.assertIsInstance(s.oldest_active,int) - self.assertEqual(s.isolation_mode,fdb.monitor.ISOLATION_READ_COMMITTED_RV) - self.assertEqual(s.lock_timeout,fdb.monitor.INFINITE_WAIT) - self.assertIsInstance(s.statements,list) + self.assertEqual(s.id, m._ic.transaction.trans_info(fdb.isc_info_tra_id)) + self.assertIs(s.attachment, m.this_attachment) + self.assertIn(s.state, [fdb.monitor.STATE_ACTIVE, fdb.monitor.STATE_IDLE]) + self.assertIsInstance(s.timestamp, datetime.datetime) + self.assertIsInstance(s.top, int) + self.assertIsInstance(s.oldest, int) + self.assertIsInstance(s.oldest_active, int) + self.assertEqual(s.isolation_mode, fdb.monitor.ISOLATION_READ_COMMITTED_RV) + self.assertEqual(s.lock_timeout, fdb.monitor.INFINITE_WAIT) + self.assertIsInstance(s.statements, list) for x in s.statements: - self.assertIsInstance(x,fdb.monitor.StatementInfo) - self.assertIsInstance(s.variables,list) - self.assertEqual(s.iostats.group,fdb.monitor.STAT_TRANSACTION) - self.assertEqual(s.iostats.stat_id,s.stat_id) + self.assertIsInstance(x, fdb.monitor.StatementInfo) + self.assertIsInstance(s.variables, list) + self.assertEqual(s.iostats.group, fdb.monitor.STAT_TRANSACTION) + self.assertEqual(s.iostats.stat_id, s.stat_id) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(m.db.tablestats),0) + self.assertEqual(len(m.db.tablestats), 0) else: - self.assertGreater(len(m.db.tablestats),0) + self.assertGreater(len(m.db.tablestats), 0) # self.assertTrue(s.isactive()) self.assertFalse(s.isidle()) @@ -5544,13 +6076,13 @@ self.assertTrue(s.isautoundo()) # s = m.get_transaction(c.transaction.trans_info(fdb.isc_info_tra_id)) - self.assertIsInstance(s.variables,list) + self.assertIsInstance(s.variables, list) if self.con.ods >= fdb.ODS_FB_25: - self.assertGreater(len(s.variables),0) + self.assertGreater(len(s.variables), 0) else: - self.assertEqual(len(s.variables),0) + self.assertEqual(len(s.variables), 0) for x in s.variables: - self.assertIsInstance(x,fdb.monitor.ContextVariableInfo) + self.assertIsInstance(x, fdb.monitor.ContextVariableInfo) def testStatementInfo(self): if self.con.ods < fdb.ODS_FB_21: return @@ -5558,47 +6090,47 @@ m.refresh() s = m.this_attachment.statements[0] # - self.assertIsInstance(s.id,int) - self.assertIs(s.attachment,m.this_attachment) - self.assertIs(s.transaction,m.transactions[0]) - self.assertIn(s.state,[fdb.monitor.STATE_ACTIVE,fdb.monitor.STATE_IDLE]) - self.assertIsInstance(s.timestamp,datetime.datetime) - self.assertEqual(s.sql_text,"select * from mon$database") + self.assertIsInstance(s.id, int) + self.assertIs(s.attachment, m.this_attachment) + self.assertEqual(s.transaction.id, m.transactions[0].id) + self.assertIn(s.state, [fdb.monitor.STATE_ACTIVE, fdb.monitor.STATE_IDLE]) + self.assertIsInstance(s.timestamp, datetime.datetime) + self.assertEqual(s.sql_text, "select * from mon$database") if self.con.ods < fdb.ODS_FB_30: self.assertIsNone(s.plan) else: - self.assertEqual(s.plan,'Select Expression\n -> Table "MON$DATABASE" Full Scan') + self.assertEqual(s.plan, 'Select Expression\n -> Table "MON$DATABASE" Full Scan') # We have to use mocks for callstack - stack = [] + stack = utils.ObjectList() stack.append(fdb.monitor.CallStackInfo(m, - {'MON$CALL_ID':1, 'MON$STATEMENT_ID':s.id-1, 'MON$CALLER_ID':None, - 'MON$OBJECT_NAME':'TRIGGER_1', 'MON$OBJECT_TYPE':2, 'MON$TIMESTAMP':datetime.datetime.now(), - 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':s.stat_id+100})) + {'MON$CALL_ID':1, 'MON$STATEMENT_ID':s.id-1, 'MON$CALLER_ID':None, + 'MON$OBJECT_NAME':'TRIGGER_1', 'MON$OBJECT_TYPE':2, 'MON$TIMESTAMP':datetime.datetime.now(), + 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':s.stat_id+100})) stack.append(fdb.monitor.CallStackInfo(m, - {'MON$CALL_ID':2, 'MON$STATEMENT_ID':s.id, 'MON$CALLER_ID':None, - 'MON$OBJECT_NAME':'TRIGGER_2', 'MON$OBJECT_TYPE':2, 'MON$TIMESTAMP':datetime.datetime.now(), - 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':s.stat_id+101})) + {'MON$CALL_ID':2, 'MON$STATEMENT_ID':s.id, 'MON$CALLER_ID':None, + 'MON$OBJECT_NAME':'TRIGGER_2', 'MON$OBJECT_TYPE':2, 'MON$TIMESTAMP':datetime.datetime.now(), + 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':s.stat_id+101})) stack.append(fdb.monitor.CallStackInfo(m, - {'MON$CALL_ID':3, 'MON$STATEMENT_ID':s.id, 'MON$CALLER_ID':2, - 'MON$OBJECT_NAME':'PROC_1', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), - 'MON$SOURCE_LINE':2, 'MON$SOURCE_COLUMN':2, 'MON$STAT_ID':s.stat_id+102})) + {'MON$CALL_ID':3, 'MON$STATEMENT_ID':s.id, 'MON$CALLER_ID':2, + 'MON$OBJECT_NAME':'PROC_1', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), + 'MON$SOURCE_LINE':2, 'MON$SOURCE_COLUMN':2, 'MON$STAT_ID':s.stat_id+102})) stack.append(fdb.monitor.CallStackInfo(m, - {'MON$CALL_ID':4, 'MON$STATEMENT_ID':s.id, 'MON$CALLER_ID':3, - 'MON$OBJECT_NAME':'PROC_2', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), - 'MON$SOURCE_LINE':3, 'MON$SOURCE_COLUMN':3, 'MON$STAT_ID':s.stat_id+103})) + {'MON$CALL_ID':4, 'MON$STATEMENT_ID':s.id, 'MON$CALLER_ID':3, + 'MON$OBJECT_NAME':'PROC_2', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), + 'MON$SOURCE_LINE':3, 'MON$SOURCE_COLUMN':3, 'MON$STAT_ID':s.stat_id+103})) stack.append(fdb.monitor.CallStackInfo(m, - {'MON$CALL_ID':5, 'MON$STATEMENT_ID':s.id+1, 'MON$CALLER_ID':None, - 'MON$OBJECT_NAME':'PROC_3', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), - 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':s.stat_id+104})) + {'MON$CALL_ID':5, 'MON$STATEMENT_ID':s.id+1, 'MON$CALLER_ID':None, + 'MON$OBJECT_NAME':'PROC_3', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), + 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':s.stat_id+104})) m.__dict__['_Monitor__callstack'] = stack # - self.assertListEqual(s.callstack,[stack[1],stack[2],stack[3]]) - self.assertEqual(s.iostats.group,fdb.monitor.STAT_STATEMENT) - self.assertEqual(s.iostats.stat_id,s.stat_id) + self.assertListEqual(s.callstack, [stack[1], stack[2], stack[3]]) + self.assertEqual(s.iostats.group, fdb.monitor.STAT_STATEMENT) + self.assertEqual(s.iostats.stat_id, s.stat_id) if self.con.ods < fdb.ODS_FB_30: - self.assertEqual(len(m.db.tablestats),0) + self.assertEqual(len(m.db.tablestats), 0) else: - self.assertGreater(len(m.db.tablestats),0) + self.assertGreater(len(m.db.tablestats), 0) # self.assertTrue(s.isactive()) self.assertFalse(s.isidle()) @@ -5609,50 +6141,51 @@ m.refresh() stmt = m.this_attachment.statements[0] # We have to use mocks for callstack - stack = [] + stack = utils.ObjectList() stack.append(fdb.monitor.CallStackInfo(m, - {'MON$CALL_ID':1, 'MON$STATEMENT_ID':stmt.id-1, 'MON$CALLER_ID':None, - 'MON$OBJECT_NAME':'POST_NEW_ORDER', 'MON$OBJECT_TYPE':2, 'MON$TIMESTAMP':datetime.datetime.now(), - 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':stmt.stat_id+100})) + {'MON$CALL_ID':1, 'MON$STATEMENT_ID':stmt.id-1, 'MON$CALLER_ID':None, + 'MON$OBJECT_NAME':'POST_NEW_ORDER', 'MON$OBJECT_TYPE':2, 'MON$TIMESTAMP':datetime.datetime.now(), + 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':stmt.stat_id+100})) stack.append(fdb.monitor.CallStackInfo(m, - {'MON$CALL_ID':2, 'MON$STATEMENT_ID':stmt.id, 'MON$CALLER_ID':None, - 'MON$OBJECT_NAME':'POST_NEW_ORDER', 'MON$OBJECT_TYPE':2, 'MON$TIMESTAMP':datetime.datetime.now(), - 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':stmt.stat_id+101})) + {'MON$CALL_ID':2, 'MON$STATEMENT_ID':stmt.id, 'MON$CALLER_ID':None, + 'MON$OBJECT_NAME':'POST_NEW_ORDER', 'MON$OBJECT_TYPE':2, 'MON$TIMESTAMP':datetime.datetime.now(), + 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':stmt.stat_id+101})) stack.append(fdb.monitor.CallStackInfo(m, - {'MON$CALL_ID':3, 'MON$STATEMENT_ID':stmt.id, 'MON$CALLER_ID':2, - 'MON$OBJECT_NAME':'SHIP_ORDER', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), - 'MON$SOURCE_LINE':2, 'MON$SOURCE_COLUMN':2, 'MON$STAT_ID':stmt.stat_id+102})) + {'MON$CALL_ID':3, 'MON$STATEMENT_ID':stmt.id, 'MON$CALLER_ID':2, + 'MON$OBJECT_NAME':'SHIP_ORDER', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), + 'MON$SOURCE_LINE':2, 'MON$SOURCE_COLUMN':2, 'MON$STAT_ID':stmt.stat_id+102})) stack.append(fdb.monitor.CallStackInfo(m, - {'MON$CALL_ID':4, 'MON$STATEMENT_ID':stmt.id, 'MON$CALLER_ID':3, - 'MON$OBJECT_NAME':'SUB_TOT_BUDGET', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), - 'MON$SOURCE_LINE':3, 'MON$SOURCE_COLUMN':3, 'MON$STAT_ID':stmt.stat_id+103})) + {'MON$CALL_ID':4, 'MON$STATEMENT_ID':stmt.id, 'MON$CALLER_ID':3, + 'MON$OBJECT_NAME':'SUB_TOT_BUDGET', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), + 'MON$SOURCE_LINE':3, 'MON$SOURCE_COLUMN':3, 'MON$STAT_ID':stmt.stat_id+103})) stack.append(fdb.monitor.CallStackInfo(m, - {'MON$CALL_ID':5, 'MON$STATEMENT_ID':stmt.id+1, 'MON$CALLER_ID':None, - 'MON$OBJECT_NAME':'SUB_TOT_BUDGET', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), - 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':stmt.stat_id+104})) + {'MON$CALL_ID':5, 'MON$STATEMENT_ID':stmt.id+1, 'MON$CALLER_ID':None, + 'MON$OBJECT_NAME':'SUB_TOT_BUDGET', 'MON$OBJECT_TYPE':5, 'MON$TIMESTAMP':datetime.datetime.now(), + 'MON$SOURCE_LINE':1, 'MON$SOURCE_COLUMN':1, 'MON$STAT_ID':stmt.stat_id+104})) m.__dict__['_Monitor__callstack'] = stack data = m.iostats[0]._attributes data['MON$STAT_ID'] = stmt.stat_id+101 data['MON$STAT_GROUP'] = fdb.monitor.STAT_CALL - m.__dict__['_Monitor__iostats'].append(fdb.monitor.IOStatsInfo(m,data)) + m.__dict__['_Monitor__iostats'] = utils.ObjectList(m.iostats) + m.__dict__['_Monitor__iostats'].append(fdb.monitor.IOStatsInfo(m, data)) # s = m.get_call(2) # - self.assertEqual(s.id,2) - self.assertIs(s.statement,m.get_statement(stmt.id)) + self.assertEqual(s.id, 2) + self.assertIs(s.statement, m.get_statement(stmt.id)) self.assertIsNone(s.caller) - self.assertIsInstance(s.dbobject,sm.Trigger) - self.assertEqual(s.dbobject.name,'POST_NEW_ORDER') - self.assertIsInstance(s.timestamp,datetime.datetime) - self.assertEqual(s.line,1) - self.assertEqual(s.column,1) - self.assertEqual(s.iostats.group,fdb.monitor.STAT_CALL) - self.assertEqual(s.iostats.stat_id,s.stat_id) + self.assertIsInstance(s.dbobject, sm.Trigger) + self.assertEqual(s.dbobject.name, 'POST_NEW_ORDER') + self.assertIsInstance(s.timestamp, datetime.datetime) + self.assertEqual(s.line, 1) + self.assertEqual(s.column, 1) + self.assertEqual(s.iostats.group, fdb.monitor.STAT_CALL) + self.assertEqual(s.iostats.stat_id, s.stat_id) # x = m.get_call(3) - self.assertIs(x.caller,s) - self.assertIsInstance(x.dbobject,sm.Procedure) - self.assertEqual(x.dbobject.name,'SHIP_ORDER') + self.assertIs(x.caller, s) + self.assertIsInstance(x.dbobject, sm.Procedure) + self.assertEqual(x.dbobject.name, 'SHIP_ORDER') def testIOStatsInfo(self): if self.con.ods < fdb.ODS_FB_21: return @@ -5660,30 +6193,30 @@ m.refresh() # for io in m.iostats: - self.assertIs(io,io.owner.iostats) + self.assertIs(io, io.owner.iostats) # s = m.iostats[0] - self.assertIsInstance(s.owner,fdb.monitor.DatabaseInfo) - self.assertEqual(s.group,fdb.monitor.STAT_DATABASE) - self.assertIsInstance(s.reads,int) - self.assertIsInstance(s.writes,int) - self.assertIsInstance(s.fetches,int) - self.assertIsInstance(s.marks,int) - self.assertIsInstance(s.seq_reads,int) - self.assertIsInstance(s.idx_reads,int) - self.assertIsInstance(s.inserts,int) - self.assertIsInstance(s.updates,int) - self.assertIsInstance(s.deletes,int) - self.assertIsInstance(s.backouts,int) - self.assertIsInstance(s.purges,int) - self.assertIsInstance(s.expunges,int) + self.assertIsInstance(s.owner, fdb.monitor.DatabaseInfo) + self.assertEqual(s.group, fdb.monitor.STAT_DATABASE) + self.assertIsInstance(s.reads, int) + self.assertIsInstance(s.writes, int) + self.assertIsInstance(s.fetches, int) + self.assertIsInstance(s.marks, int) + self.assertIsInstance(s.seq_reads, int) + self.assertIsInstance(s.idx_reads, int) + self.assertIsInstance(s.inserts, int) + self.assertIsInstance(s.updates, int) + self.assertIsInstance(s.deletes, int) + self.assertIsInstance(s.backouts, int) + self.assertIsInstance(s.purges, int) + self.assertIsInstance(s.expunges, int) if self.con.ods >= fdb.ODS_FB_30: - self.assertIsInstance(s.locks,int) - self.assertIsInstance(s.waits,int) - self.assertIsInstance(s.conflits,int) - self.assertIsInstance(s.backversion_reads,int) - self.assertIsInstance(s.fragment_reads,int) - self.assertIsInstance(s.repeated_reads,int) + self.assertIsInstance(s.locks, int) + self.assertIsInstance(s.waits, int) + self.assertIsInstance(s.conflits, int) + self.assertIsInstance(s.backversion_reads, int) + self.assertIsInstance(s.fragment_reads, int) + self.assertIsInstance(s.repeated_reads, int) else: self.assertIsNone(s.locks) self.assertIsNone(s.waits) @@ -5692,10 +6225,10 @@ self.assertIsNone(s.fragment_reads) self.assertIsNone(s.repeated_reads) if self.con.ods >= fdb.ODS_FB_25: - self.assertIsInstance(s.memory_used,int) - self.assertIsInstance(s.memory_allocated,int) - self.assertIsInstance(s.max_memory_used,int) - self.assertIsInstance(s.max_memory_allocated,int) + self.assertIsInstance(s.memory_used, int) + self.assertIsInstance(s.memory_allocated, int) + self.assertIsInstance(s.max_memory_used, int) + self.assertIsInstance(s.max_memory_allocated, int) else: self.assertIsNone(s.memory_used) self.assertIsNone(s.memory_allocated) @@ -5715,61 +6248,106 @@ m = self.con.monitor m.refresh() # - self.assertEqual(len(m.variables),2) + self.assertEqual(len(m.variables), 2) # s = m.variables[0] - self.assertIs(s.attachment,m.this_attachment) + self.assertIs(s.attachment, m.this_attachment) self.assertIsNone(s.transaction) - self.assertEqual(s.name,'SVAR') - self.assertEqual(s.value,'TEST_VALUE') + self.assertEqual(s.name, 'SVAR') + self.assertEqual(s.value, 'TEST_VALUE') self.assertTrue(s.isattachmentvar()) self.assertFalse(s.istransactionvar()) # s = m.variables[1] self.assertIsNone(s.attachment) self.assertIs(s.transaction, - m.get_transaction(c.transaction.trans_info(fdb.isc_info_tra_id))) - self.assertEqual(s.name,'TVAR') - self.assertEqual(s.value,'TEST_VALUE') + m.get_transaction(c.transaction.trans_info(fdb.isc_info_tra_id))) + self.assertEqual(s.name, 'TVAR') + self.assertEqual(s.value, 'TEST_VALUE') self.assertFalse(s.isattachmentvar()) self.assertTrue(s.istransactionvar()) class TestConnectionWithSchema(FDBTestBase): def setUp(self): - super(TestConnectionWithSchema,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,self.FBTEST_DB) + super(TestConnectionWithSchema, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) #self.con = fdb.connect(dsn=self.dbfile,user=FBTEST_USER,password=FBTEST_PASSWORD) def tearDown(self): #self.con.close() pass def testConnectSchema(self): - s = fdb.connect(host=FBTEST_HOST,database=self.dbfile,user=FBTEST_USER, + s = fdb.connect(host=FBTEST_HOST, database=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD, connection_class=fdb.ConnectionWithSchema) if s.ods < fdb.ODS_FB_30: - self.assertEqual(len(s.tables),15) + self.assertEqual(len(s.tables), 15) else: - self.assertEqual(len(s.tables),16) - self.assertEqual(s.get_table('JOB').name,'JOB') + self.assertEqual(len(s.tables), 16) + self.assertEqual(s.get_table('JOB').name, 'JOB') + + +class TestHooks(FDBTestBase): + def setUp(self): + super(TestHooks, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + def __hook_service_attached(self, con): + self._svc = con + return con + def __hook_db_attached(self, con): + self._db = con + return con + def __hook_db_attach_request_a(self, dsn, dpb): + return None + def __hook_db_attach_request_b(self, dsn, dpb): + return self._hook_con + def test_hook_db_attached(self): + fdb.add_hook(fdb.HOOK_DATABASE_ATTACHED, + self.__hook_db_attached) + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: + self.assertEqual(con, self._db) + fdb.remove_hook(fdb.HOOK_DATABASE_ATTACHED, + self.__hook_db_attached) + def test_hook_db_attach_request(self): + self._hook_con = fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) + fdb.add_hook(fdb.HOOK_DATABASE_ATTACH_REQUEST, + self.__hook_db_attach_request_a) + fdb.add_hook(fdb.HOOK_DATABASE_ATTACH_REQUEST, + self.__hook_db_attach_request_b) + self.assertListEqual([self.__hook_db_attach_request_a, + self.__hook_db_attach_request_b], + fdb.get_hooks(fdb.HOOK_DATABASE_ATTACH_REQUEST)) + with fdb.connect(dsn=self.dbfile, user=FBTEST_USER, password=FBTEST_PASSWORD) as con: + self.assertEqual(con, self._hook_con) + self._hook_con.close() + fdb.remove_hook(fdb.HOOK_DATABASE_ATTACH_REQUEST, + self.__hook_db_attach_request_a) + fdb.remove_hook(fdb.HOOK_DATABASE_ATTACH_REQUEST, + self.__hook_db_attach_request_b) + def test_hook_service_attached(self): + fdb.add_hook(fdb.HOOK_SERVICE_ATTACHED, + self.__hook_service_attached) + svc = fdb.services.connect(host=FBTEST_HOST, password=FBTEST_PASSWORD) + self.assertEqual(svc, self._svc) + svc.close() + fdb.remove_hook(fdb.HOOK_SERVICE_ATTACHED, + self.__hook_service_attached) class TestBugs(FDBTestBase): + "Tests for bugs logged in tracker, URL pattern: http://tracker.firebirdsql.org/browse/PYFB-" def setUp(self): - super(TestBugs,self).setUp() - self.cwd = os.getcwd() - self.dbpath = os.path.join(self.cwd,'test') - self.dbfile = os.path.join(self.dbpath,'fbbugs.fdb') + super(TestBugs, self).setUp() + self.dbfile = os.path.join(self.dbpath, 'fbbugs.fdb') if os.path.exists(self.dbfile): os.remove(self.dbfile) - self.con = fdb.create_database(host=FBTEST_HOST,database=self.dbfile, - user=FBTEST_USER,password=FBTEST_PASSWORD) + self.con = fdb.create_database(host=FBTEST_HOST, database=self.dbfile, + user=FBTEST_USER, password=FBTEST_PASSWORD) def tearDown(self): self.con.drop_database() self.con.close() def test_pyfb_17(self): + "(PYFB-17) NOT NULL constraint + Insert Trigger" create_table = """ Create Table table1 ( ID Integer, @@ -5795,6 +6373,7 @@ # PYFB-17: fails with fdb, passes with kinterbasdb cur.execute('insert into table1 (ID, C1) values(1, ?)', (None, )) def test_pyfb_22(self): + "(PYFB-22) SELECT FROM VARCHAR COLUMN WITH TEXT LONGER THAN 128 CHARS RETURN EMPTY STRING" create_table = """ CREATE TABLE FDBTEST ( ID INTEGER, @@ -5819,10 +6398,11 @@ i = 0 for row in cur: value = row[0] - self.assertEqual(len(value),i) - self.assertEqual(value,data[:i]) + self.assertEqual(len(value), i) + self.assertEqual(value, data[:i]) i += 1 def test_pyfb_25(self): + "(PYFB-25) Trancate long text from VARCHAR(5000)" create_table = """ CREATE TABLE FDBTEST2 ( ID INTEGER, @@ -5840,8 +6420,9 @@ # PYFB-25: fails with fdb, passes with kinterbasdb cur.execute("select test5000 from fdbtest2") row = cur.fetchone() - self.assertEqual(row[0],data) + self.assertEqual(row[0], data) def test_pyfb_30(self): + "(PYFB-30) BLOBs are truncated at first zero byte" create_table = """ CREATE TABLE FDBTEST3 ( ID INTEGER, @@ -5852,7 +6433,7 @@ cur.execute(create_table) self.con.commit() # test data - data_bytes = (1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9) + data_bytes = (1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9) blob_data = fdb.bs(data_bytes) cur.execute("insert into fdbtest3 (id, t_blob) values (?, ?)", (1, blob_data)) @@ -5862,22 +6443,24 @@ # PYFB-XX: binary blob trucated at zero-byte cur.execute("select t_blob from fdbtest3 where id = 1") row = cur.fetchone() - self.assertEqual(row[0],blob_data) + self.assertEqual(row[0], blob_data) cur.execute("select t_blob from fdbtest3 where id = 2") row = cur.fetchone() - self.assertEqual(row[0],blob_data) + self.assertEqual(row[0], blob_data) p = cur.prep("select t_blob from fdbtest3 where id = 2") p.set_stream_blob('T_BLOB') cur.execute(p) blob_reader = cur.fetchone()[0] value = blob_reader.read() - self.assertEqual(value,blob_data) + self.assertEqual(value, blob_data) def test_pyfb_34(self): + "(PYFB-34) Server resources not released on PreparedStatement destruction" cur = self.con.cursor() - cur.execute( "select * from RDB$Relations") + cur.execute("select * from RDB$Relations") cur.fetchall() del cur def test_pyfb_35(self): + "(PYFB-35) Call to fetch after a sql statement without a result should raise exception" create_table = """ Create Table table1 ( ID Integer, @@ -5894,7 +6477,7 @@ with self.assertRaises(fdb.DatabaseError) as cm: cur.fetchall() self.assertTupleEqual(cm.exception.args, - ("Cannot fetch from this cursor because it has not executed a statement.",)) + ("Cannot fetch from this cursor because it has not executed a statement.",)) cur.execute("select * from RDB$DATABASE") cur.fetchall() @@ -5902,30 +6485,4486 @@ with self.assertRaises(fdb.DatabaseError) as cm: cur.fetchall() self.assertTupleEqual(cm.exception.args, - ("Attempt to fetch row of results after statement that does not produce result set.",)) + ("Attempt to fetch row of results after statement that does not produce result set.",)) cur.execute("insert into table1 (ID,C1) values (1,1) returning ID") row = cur.fetchall() - self.assertListEqual(row,[(1,)]) + self.assertListEqual(row, [(1,)]) def test_pyfb_44(self): - self.con2 = fdb.connect(host=FBTEST_HOST,database=os.path.join(self.dbpath,self.FBTEST_DB), - user=FBTEST_USER,password=FBTEST_PASSWORD) + "(PYFB-44) Inserting a datetime.date into a TIMESTAMP column does not work" + self.con2 = fdb.connect(host=FBTEST_HOST, database=os.path.join(self.dbpath, self.FBTEST_DB), + user=FBTEST_USER, password=FBTEST_PASSWORD) try: cur = self.con2.cursor() - now = datetime.datetime(2011,11,13,15,00,1,200) - cur.execute('insert into T2 (C1,C8) values (?,?)',[3,now.date()]) + now = datetime.datetime(2011, 11, 13, 15, 00, 1, 200) + cur.execute('insert into T2 (C1,C8) values (?,?)', [3, now.date()]) self.con2.commit() cur.execute('select C1,C8 from T2 where C1 = 3') rows = cur.fetchall() self.assertListEqual(rows, - [(3, datetime.datetime(2011, 11, 13, 0, 0, 0, 0))]) + [(3, datetime.datetime(2011, 11, 13, 0, 0, 0, 0))]) finally: self.con2.execute_immediate("delete from t2") self.con2.commit() self.con2.close() + +class TestTraceParse(FDBTestBase): + def setUp(self): + super(TestTraceParse, self).setUp() + self.dbfile = os.path.join(self.dbpath, self.FBTEST_DB) + def test_linesplit_iter(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +""" + for line in linesplit_iter(trace_lines): + self.output.write(line + '\n') + self.assertEqual(self.output.getvalue(), trace_lines) + def _check_events(self, trace_lines, output): + parser = fdb.trace.TraceParser() + for obj in parser.parse(linesplit_iter(trace_lines)): + self.printout(str(obj)) + self.assertEqual(self.output.getvalue(), output, "Parsed events do not match expected ones") + def test_trace_init(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) TRACE_INIT + SESSION_1 + +""" + output = "EventTraceInit(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), session_name='SESSION_1')\n" + self._check_events(trace_lines, output) + def test_trace_suspend(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) TRACE_INIT + SESSION_1 + +--- Session 1 is suspended as its log is full --- +2014-05-23T12:01:01.1420 (3720:0000000000EFD9E8) TRACE_INIT + SESSION_1 + +""" + output = """EventTraceInit(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), session_name='SESSION_1') +EventTraceSuspend(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), session_name='SESSION_1') +EventTraceInit(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 12, 1, 1, 142000), session_name='SESSION_1') +""" + self._check_events(trace_lines, output) + def test_trace_finish(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) TRACE_INIT + SESSION_1 + +2014-05-23T11:01:24.8080 (3720:0000000000EFD9E8) TRACE_FINI + SESSION_1 + +""" + output = """EventTraceInit(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), session_name='SESSION_1') +EventTraceFinish(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 1, 24, 808000), session_name='SESSION_1') +""" + self._check_events(trace_lines, output) + def test_create_database(self): + trace_lines = """2018-03-29T14:20:55.1180 (6290:0x7f9bb00bb978) CREATE_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +""" + output = """EventCreate(event_id=1, timestamp=datetime.datetime(2018, 3, 29, 14, 20, 55, 118000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +""" + self._check_events(trace_lines, output) + def test_drop_database(self): + trace_lines = """2018-03-29T14:20:55.1180 (6290:0x7f9bb00bb978) DROP_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +""" + output = """EventDrop(event_id=1, timestamp=datetime.datetime(2018, 3, 29, 14, 20, 55, 118000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +""" + self._check_events(trace_lines, output) + def test_attach(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +""" + self._check_events(trace_lines, output) + def test_attach_failed(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) FAILED ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status='F', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +""" + self._check_events(trace_lines, output) + def test_unauthorized_attach(self): + trace_lines = """2014-09-24T14:46:15.0350 (2453:0x7fed02a04910) UNAUTHORIZED ATTACH_DATABASE + /home/employee.fdb (ATT_0, sysdba, NONE, TCPv4:127.0.0.1) + /opt/firebird/bin/isql:8723 + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 9, 24, 14, 46, 15, 35000), status='U', attachment_id=0, database='/home/employee.fdb', charset='NONE', protocol='TCPv4', address='127.0.0.1', user='sysdba', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +""" + self._check_events(trace_lines, output) + def test_detach(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:01:24.8080 (3720:0000000000EFD9E8) DETACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventDetach(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 1, 24, 808000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +""" + self._check_events(trace_lines, output) + def test_detach_without_attach(self): + trace_lines = """2014-05-23T11:01:24.8080 (3720:0000000000EFD9E8) DETACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +""" + output = """EventDetach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 1, 24, 808000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +""" + self._check_events(trace_lines, output) + def test_start_transaction(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +""" + self._check_events(trace_lines, output) + def test_start_transaction_without_attachment(self): + trace_lines = """2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +""" + self._check_events(trace_lines, output) + def test_commit(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) COMMIT_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + 0 ms, 1 read(s), 1 write(s), 1 fetch(es), 1 mark(s) + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventCommit(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=0, reads=1, writes=1, fetches=1, marks=1) +""" + self._check_events(trace_lines, output) + def test_commit_no_performance(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) COMMIT_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventCommit(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=None, reads=None, writes=None, fetches=None, marks=None) +""" + self._check_events(trace_lines, output) + def test_commit_without_attachment_and_start(self): + trace_lines = """2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) COMMIT_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + 0 ms, 1 read(s), 1 write(s), 1 fetch(es), 1 mark(s) + +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventCommit(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=0, reads=1, writes=1, fetches=1, marks=1) +""" + self._check_events(trace_lines, output) + def test_rollback(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) ROLLBACK_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) +0 ms + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventRollback(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=0, reads=None, writes=None, fetches=None, marks=None) +""" + self._check_events(trace_lines, output) + def test_rollback_no_performance(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) ROLLBACK_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventRollback(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=None, reads=None, writes=None, fetches=None, marks=None) +""" + self._check_events(trace_lines, output) + def test_rollback_attachment_and_start(self): + trace_lines = """2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) ROLLBACK_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) +0 ms + +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventRollback(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=0, reads=None, writes=None, fetches=None, marks=None) +""" + self._check_events(trace_lines, output) + def test_commit_retaining(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) COMMIT_RETAINING + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + 0 ms, 1 read(s), 1 write(s), 1 fetch(es), 1 mark(s) + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventCommitRetaining(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=0, reads=1, writes=1, fetches=1, marks=1) +""" + self._check_events(trace_lines, output) + def test_commit_retaining_no_performance(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) COMMIT_RETAINING + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventCommitRetaining(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=None, reads=None, writes=None, fetches=None, marks=None) +""" + self._check_events(trace_lines, output) + def test_commit_retaining_without_attachment_and_start(self): + trace_lines = """2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) COMMIT_RETAINING + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + 0 ms, 1 read(s), 1 write(s), 1 fetch(es), 1 mark(s) + +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventCommitRetaining(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=0, reads=1, writes=1, fetches=1, marks=1) +""" + self._check_events(trace_lines, output) + def test_rollback_retaining(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) ROLLBACK_RETAINING + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) +0 ms + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventRollbackRetaining(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=0, reads=None, writes=None, fetches=None, marks=None) +""" + self._check_events(trace_lines, output) + def test_rollback_retaining_no_performance(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) ROLLBACK_RETAINING + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventRollbackRetaining(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=None, reads=None, writes=None, fetches=None, marks=None) +""" + self._check_events(trace_lines, output) + def test_rollback_retaining_without_attachment_and_start(self): + trace_lines = """2014-05-23T11:00:29.9570 (3720:0000000000EFD9E8) ROLLBACK_RETAINING + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1568, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) +0 ms + +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventRollbackRetaining(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 29, 957000), status=' ', attachment_id=8, transaction_id=1568, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE'], run_time=0, reads=None, writes=None, fetches=None, marks=None) +""" + self._check_events(trace_lines, output) + def test_prepare_statement(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) PREPARE_STATEMENT + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) FROM RDB$DATABASE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (RDB$DATABASE NATURAL) + 13 ms + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) FROM RDB$DATABASE', plan='PLAN (RDB$DATABASE NATURAL)') +EventPrepareStatement(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, prepare_time=13) +""" + self._check_events(trace_lines, output) + def test_prepare_statement_no_plan(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) PREPARE_STATEMENT + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) FROM RDB$DATABASE + 13 ms + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) FROM RDB$DATABASE', plan=None) +EventPrepareStatement(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, prepare_time=13) +""" + self._check_events(trace_lines, output) + def test_prepare_statement_no_attachment(self): + trace_lines = """2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) PREPARE_STATEMENT + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) FROM RDB$DATABASE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (RDB$DATABASE NATURAL) + 13 ms + +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) FROM RDB$DATABASE', plan='PLAN (RDB$DATABASE NATURAL)') +EventPrepareStatement(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, prepare_time=13) +""" + self._check_events(trace_lines, output) + def test_prepare_statement_no_transaction(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) PREPARE_STATEMENT + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) FROM RDB$DATABASE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (RDB$DATABASE NATURAL) + 13 ms + +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +TransactionInfo(attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) FROM RDB$DATABASE', plan='PLAN (RDB$DATABASE NATURAL)') +EventPrepareStatement(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, prepare_time=13) +""" + self._check_events(trace_lines, output) + def test_prepare_statement_no_attachment_no_transaction(self): + trace_lines = """2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) PREPARE_STATEMENT + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) FROM RDB$DATABASE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (RDB$DATABASE NATURAL) + 13 ms + +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +TransactionInfo(attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) FROM RDB$DATABASE', plan='PLAN (RDB$DATABASE NATURAL)') +EventPrepareStatement(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, prepare_time=13) +""" + self._check_events(trace_lines, output) + def test_statement_start(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_START + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 166353: +------------------------------------------------------------------------------- +UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=? + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (TABLE_A INDEX (TABLE_A_PK)) + +param0 = timestamp, "2017-11-09T11:23:52.1570" +param1 = integer, "100012829" +param2 = integer, "" +param3 = varchar(20), "2810090906551" +param4 = integer, "4199300" +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +ParamInfo(par_id=1, params=[('timestamp', datetime.datetime(2017, 11, 9, 11, 23, 52, 157000)), ('integer', 100012829), ('integer', None), ('varchar(20)', '2810090906551'), ('integer', 4199300)]) +SQLInfo(sql_id=1, sql='UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=?', plan='PLAN (TABLE_A INDEX (TABLE_A_PK))') +EventStatementStart(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, statement_id=166353, sql_id=1, param_id=1) +""" + self._check_events(trace_lines, output) + def test_statement_start_no_plan(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_START + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 166353: +------------------------------------------------------------------------------- +UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=? +param0 = timestamp, "2017-11-09T11:23:52.1570" +param1 = integer, "100012829" +param2 = integer, "" +param3 = varchar(20), "2810090906551" +param4 = integer, "4199300" +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +ParamInfo(par_id=1, params=[('timestamp', datetime.datetime(2017, 11, 9, 11, 23, 52, 157000)), ('integer', 100012829), ('integer', None), ('varchar(20)', '2810090906551'), ('integer', 4199300)]) +SQLInfo(sql_id=1, sql='UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=?', plan=None) +EventStatementStart(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, statement_id=166353, sql_id=1, param_id=1) +""" + self._check_events(trace_lines, output) + def test_statement_start_no_attachment(self): + trace_lines = """2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_START + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 166353: +------------------------------------------------------------------------------- +UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=? + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (TABLE_A INDEX (TABLE_A_PK)) + +param0 = timestamp, "2017-11-09T11:23:52.1570" +param1 = integer, "100012829" +param2 = integer, "" +param3 = varchar(20), "2810090906551" +param4 = integer, "4199300" +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +ParamInfo(par_id=1, params=[('timestamp', datetime.datetime(2017, 11, 9, 11, 23, 52, 157000)), ('integer', 100012829), ('integer', None), ('varchar(20)', '2810090906551'), ('integer', 4199300)]) +SQLInfo(sql_id=1, sql='UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=?', plan='PLAN (TABLE_A INDEX (TABLE_A_PK))') +EventStatementStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, statement_id=166353, sql_id=1, param_id=1) +""" + self._check_events(trace_lines, output) + def test_statement_start_no_transaction(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_START + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 166353: +------------------------------------------------------------------------------- +UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=? + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (TABLE_A INDEX (TABLE_A_PK)) + +param0 = timestamp, "2017-11-09T11:23:52.1570" +param1 = integer, "100012829" +param2 = integer, "" +param3 = varchar(20), "2810090906551" +param4 = integer, "4199300" +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +TransactionInfo(attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +ParamInfo(par_id=1, params=[('timestamp', datetime.datetime(2017, 11, 9, 11, 23, 52, 157000)), ('integer', 100012829), ('integer', None), ('varchar(20)', '2810090906551'), ('integer', 4199300)]) +SQLInfo(sql_id=1, sql='UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=?', plan='PLAN (TABLE_A INDEX (TABLE_A_PK))') +EventStatementStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, statement_id=166353, sql_id=1, param_id=1) +""" + self._check_events(trace_lines, output) + def test_statement_start_no_attachment_no_transaction(self): + trace_lines = """2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_START + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 166353: +------------------------------------------------------------------------------- +UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=? + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (TABLE_A INDEX (TABLE_A_PK)) + +param0 = timestamp, "2017-11-09T11:23:52.1570" +param1 = integer, "100012829" +param2 = integer, "" +param3 = varchar(20), "2810090906551" +param4 = integer, "4199300" +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +TransactionInfo(attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +ParamInfo(par_id=1, params=[('timestamp', datetime.datetime(2017, 11, 9, 11, 23, 52, 157000)), ('integer', 100012829), ('integer', None), ('varchar(20)', '2810090906551'), ('integer', 4199300)]) +SQLInfo(sql_id=1, sql='UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=?', plan='PLAN (TABLE_A INDEX (TABLE_A_PK))') +EventStatementStart(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, statement_id=166353, sql_id=1, param_id=1) +""" + self._check_events(trace_lines, output) + def test_statement_finish(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5420 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_FINISH + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (RDB$DATABASE NATURAL) +1 records fetched + 0 ms, 2 read(s), 14 fetch(es), 1 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$DATABASE 1 +RDB$CHARACTER_SETS 1 +RDB$COLLATIONS 1 +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE', plan='PLAN (RDB$DATABASE NATURAL)') +EventStatementFinish(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 542000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, param_id=None, records=1, run_time=0, reads=2, writes=None, fetches=14, marks=1, access=[AccessTuple(table='RDB$DATABASE', natural=1, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$CHARACTER_SETS', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$COLLATIONS', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +""" + self._check_events(trace_lines, output) + def test_statement_finish_no_plan(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5420 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_FINISH + /home/employee.fdb (ATT_8, EUROFLOW:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE +1 records fetched + 0 ms, 2 read(s), 14 fetch(es), 1 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$DATABASE 1 +RDB$CHARACTER_SETS 1 +RDB$COLLATIONS 1 +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE', plan=None) +EventStatementFinish(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 542000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, param_id=None, records=1, run_time=0, reads=2, writes=None, fetches=14, marks=1, access=[AccessTuple(table='RDB$DATABASE', natural=1, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$CHARACTER_SETS', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$COLLATIONS', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +""" + self._check_events(trace_lines, output) + def test_statement_finish_no_attachment(self): + trace_lines = """2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5420 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_FINISH + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (RDB$DATABASE NATURAL) +1 records fetched + 0 ms, 2 read(s), 14 fetch(es), 1 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$DATABASE 1 +RDB$CHARACTER_SETS 1 +RDB$COLLATIONS 1 +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE', plan='PLAN (RDB$DATABASE NATURAL)') +EventStatementFinish(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 542000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, param_id=None, records=1, run_time=0, reads=2, writes=None, fetches=14, marks=1, access=[AccessTuple(table='RDB$DATABASE', natural=1, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$CHARACTER_SETS', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$COLLATIONS', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +""" + self._check_events(trace_lines, output) + def test_statement_finish_no_transaction(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:45.5420 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_FINISH + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (RDB$DATABASE NATURAL) +1 records fetched + 0 ms, 2 read(s), 14 fetch(es), 1 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$DATABASE 1 +RDB$CHARACTER_SETS 1 +RDB$COLLATIONS 1 +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +TransactionInfo(attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE', plan='PLAN (RDB$DATABASE NATURAL)') +EventStatementFinish(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 542000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, param_id=None, records=1, run_time=0, reads=2, writes=None, fetches=14, marks=1, access=[AccessTuple(table='RDB$DATABASE', natural=1, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$CHARACTER_SETS', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$COLLATIONS', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +""" + self._check_events(trace_lines, output) + def test_statement_finish_no_attachment_no_transaction(self): + trace_lines = """2014-05-23T11:00:45.5420 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_FINISH + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (RDB$DATABASE NATURAL) +1 records fetched + 0 ms, 2 read(s), 14 fetch(es), 1 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$DATABASE 1 +RDB$CHARACTER_SETS 1 +RDB$COLLATIONS 1 +""" + output = """AttachmentInfo(attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +TransactionInfo(attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE', plan='PLAN (RDB$DATABASE NATURAL)') +EventStatementFinish(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 542000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, param_id=None, records=1, run_time=0, reads=2, writes=None, fetches=14, marks=1, access=[AccessTuple(table='RDB$DATABASE', natural=1, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$CHARACTER_SETS', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$COLLATIONS', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +""" + self._check_events(trace_lines, output) + def test_statement_finish_no_performance(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5420 (3720:0000000000EFD9E8) EXECUTE_STATEMENT_FINISH + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 181: +------------------------------------------------------------------------------- +SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (RDB$DATABASE NATURAL) +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='SELECT GEN_ID(GEN_NUM, 1) NUMS FROM RDB$DATABASE', plan='PLAN (RDB$DATABASE NATURAL)') +EventStatementFinish(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 542000), status=' ', attachment_id=8, transaction_id=1570, statement_id=181, sql_id=1, param_id=None, records=None, run_time=None, reads=None, writes=None, fetches=None, marks=None, access=None) +""" + self._check_events(trace_lines, output) + def test_statement_free(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) FREE_STATEMENT + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 166353: +------------------------------------------------------------------------------- +UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (TABLE_A INDEX (TABLE_A_PK)) +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=?', plan='PLAN (TABLE_A INDEX (TABLE_A_PK))') +EventFreeStatement(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), attachment_id=8, transaction_id=1570, statement_id=166353, sql_id=1) +""" + self._check_events(trace_lines, output) + def test_close_cursor(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) CLOSE_CURSOR + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Statement 166353: +------------------------------------------------------------------------------- +UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PLAN (TABLE_A INDEX (TABLE_A_PK)) +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +SQLInfo(sql_id=1, sql='UPDATE TABLE_A SET VAL_1=?, VAL_2=?, VAL_3=?, VAL_4=? WHERE ID_EX=?', plan='PLAN (TABLE_A INDEX (TABLE_A_PK))') +EventCloseCursor(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), attachment_id=8, transaction_id=1570, statement_id=166353, sql_id=1) +""" + self._check_events(trace_lines, output) + def test_trigger_start(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) EXECUTE_TRIGGER_START + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + BI_TABLE_A FOR TABLE_A (BEFORE INSERT) +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventTriggerStart(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, trigger='BI_TABLE_A', table='TABLE_A', event='BEFORE INSERT') +""" + self._check_events(trace_lines, output) + def test_trigger_finish(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) EXECUTE_TRIGGER_FINISH + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + AIU_TABLE_A FOR TABLE_A (AFTER INSERT) + 1118 ms, 681 read(s), 80 write(s), 1426 fetch(es), 80 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$DATABASE 1 +RDB$INDICES 107 +RDB$RELATIONS 10 +RDB$FORMATS 6 +RDB$RELATION_CONSTRAINTS 20 +TABLE_A 1 +TABLE_B 2 +TABLE_C 1 +TABLE_D 1 +TABLE_E 3 +TABLE_F 25 +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventTriggerFinish(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, trigger='AIU_TABLE_A', table='TABLE_A', event='AFTER INSERT', run_time=1118, reads=681, writes=80, fetches=1426, marks=80, access=[AccessTuple(table='RDB$DATABASE', natural=1, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$INDICES', natural=0, index=107, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$RELATIONS', natural=0, index=10, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$FORMATS', natural=0, index=6, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='RDB$RELATION_CONSTRAINTS', natural=0, index=20, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='TABLE_A', natural=0, index=0, update=0, insert=1, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='TABLE_B', natural=0, index=2, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='TABLE_C', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='TABLE_D', natural=0, index=0, update=0, insert=1, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='TABLE_E', natural=0, index=3, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='TABLE_F', natural=0, index=25, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +""" + self._check_events(trace_lines, output) + def test_procedure_start(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) EXECUTE_PROCEDURE_START + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Procedure PROC_A: +param0 = varchar(50), "758749" +param1 = varchar(10), "XXX" +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +ParamInfo(par_id=1, params=[('varchar(50)', '758749'), ('varchar(10)', 'XXX')]) +EventProcedureStart(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, procedure='PROC_A', param_id=1) +""" + self._check_events(trace_lines, output) + def test_procedure_finish(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2014-05-23T11:00:45.5260 (3720:0000000000EFD9E8) EXECUTE_PROCEDURE_FINISH + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +Procedure PROC_A: +param0 = varchar(10), "XXX" +param1 = double precision, "313204" +param2 = double precision, "1" +param3 = varchar(20), "50031" +param4 = varchar(20), "GGG(1.25)" +param5 = varchar(10), "PP100X120" +param6 = varchar(20), "" +param7 = double precision, "3.33333333333333" +param8 = double precision, "45" +param9 = integer, "3" +param10 = integer, "" +param11 = double precision, "1" +param12 = integer, "0" + + 0 ms, 14 read(s), 14 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +TABLE_A 1 +TABLE_B 1 +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +ParamInfo(par_id=1, params=[('varchar(10)', 'XXX'), ('double precision', Decimal('313204')), ('double precision', Decimal('1')), ('varchar(20)', '50031'), ('varchar(20)', 'GGG(1.25)'), ('varchar(10)', 'PP100X120'), ('varchar(20)', None), ('double precision', Decimal('3.33333333333333')), ('double precision', Decimal('45')), ('integer', 3), ('integer', None), ('double precision', Decimal('1')), ('integer', 0)]) +EventProcedureFinish(event_id=3, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 45, 526000), status=' ', attachment_id=8, transaction_id=1570, procedure='PROC_A', param_id=1, run_time=0, reads=14, writes=None, fetches=14, marks=None, access=[AccessTuple(table='TABLE_A', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0), AccessTuple(table='TABLE_B', natural=0, index=1, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +""" + self._check_events(trace_lines, output) + def test_service_attach(self): + trace_lines = """2017-11-13T11:49:51.3110 (2500:0000000026C3C858) ATTACH_SERVICE + service_mgr, (Service 0000000019993DC0, SYSDBA, TCPv4:127.0.0.1, /job/fbtrace:385) +""" + output = """ServiceInfo(service_id=429473216, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=385) +EventServiceAttach(event_id=1, timestamp=datetime.datetime(2017, 11, 13, 11, 49, 51, 311000), status=' ', service_id=429473216) +""" + self._check_events(trace_lines, output) + def test_service_detach(self): + trace_lines = """2017-11-13T22:50:09.3790 (2500:0000000026C39D70) DETACH_SERVICE + service_mgr, (Service 0000000028290058, SYSDBA, TCPv4:127.0.0.1, /job/fbtrace:385) +""" + output = """ServiceInfo(service_id=673775704, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=385) +EventServiceDetach(event_id=1, timestamp=datetime.datetime(2017, 11, 13, 22, 50, 9, 379000), status=' ', service_id=673775704) +""" + self._check_events(trace_lines, output) + def test_service_start(self): + trace_lines = """2017-11-13T11:49:07.7860 (2500:0000000001A4DB68) START_SERVICE + service_mgr, (Service 000000001F6F1CF8, SYSDBA, TCPv4:127.0.0.1, /job/fbtrace:385) + "Start Trace Session" + -TRUSTED_SVC SYSDBA -START -CONFIG +enabled true +log_connections true +log_transactions true +log_statement_prepare false +log_statement_free false +log_statement_start false +log_statement_finish false +print_plan false +print_perf false +time_threshold 1000 +max_sql_length 300 +max_arg_length 80 +max_arg_count 30 +log_procedure_start false +log_procedure_finish false +log_trigger_start false +log_trigger_finish false +log_context false +log_errors false +log_sweep false +log_blr_requests false +print_blr false +max_blr_length 500 +log_dyn_requests false +print_dyn false +max_dyn_length 500 +log_warnings false +log_initfini false + + + +enabled true +log_services true +log_errors false +log_warnings false +log_initfini false + +""" + output = """ServiceInfo(service_id=527375608, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=385) +EventServiceStart(event_id=1, timestamp=datetime.datetime(2017, 11, 13, 11, 49, 7, 786000), status=' ', service_id=527375608, action='Start Trace Session', parameters=['-TRUSTED_SVC SYSDBA -START -CONFIG ', 'enabled true', 'log_connections true', 'log_transactions true', 'log_statement_prepare false', 'log_statement_free false', 'log_statement_start false', 'log_statement_finish false', 'print_plan false', 'print_perf false', 'time_threshold 1000', 'max_sql_length 300', 'max_arg_length 80', 'max_arg_count 30', 'log_procedure_start false', 'log_procedure_finish false', 'log_trigger_start false', 'log_trigger_finish false', 'log_context false', 'log_errors false', 'log_sweep false', 'log_blr_requests false', 'print_blr false', 'max_blr_length 500', 'log_dyn_requests false', 'print_dyn false', 'max_dyn_length 500', 'log_warnings false', 'log_initfini false', '', '', 'enabled true', 'log_services true', 'log_errors false', 'log_warnings false', 'log_initfini false', '']) +""" + self._check_events(trace_lines, output) + def test_service_query(self): + trace_lines = """2018-03-29T14:02:10.9180 (5924:0x7feab93f4978) QUERY_SERVICE + service_mgr, (Service 0x7feabd3da548, SYSDBA, TCPv4:127.0.0.1, /job/fbtrace:385) + "Start Trace Session" + Receive portion of the query: + retrieve 1 line of service output per call + +2018-04-03T12:41:01.7970 (5831:0x7f748c054978) QUERY_SERVICE + service_mgr, (Service 0x7f748f839540, SYSDBA, TCPv4:127.0.0.1, /job/fbtrace:4631) + Receive portion of the query: + retrieve the version of the server engine + +2018-04-03T12:41:30.7840 (5831:0x7f748c054978) QUERY_SERVICE + service_mgr, (Service 0x7f748f839540, SYSDBA, TCPv4:127.0.0.1, /job/fbtrace:4631) + Receive portion of the query: + retrieve the implementation of the Firebird server + +2018-04-03T12:56:27.5590 (5831:0x7f748c054978) QUERY_SERVICE + service_mgr, (Service 0x7f748f839540, SYSDBA, TCPv4:127.0.0.1, /job/fbtrace:4631) + "Repair Database" +""" + output = """ServiceInfo(service_id=140646174008648, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=385) +EventServiceQuery(event_id=1, timestamp=datetime.datetime(2018, 3, 29, 14, 2, 10, 918000), status=' ', service_id=140646174008648, action='Start Trace Session', parameters=['Receive portion of the query:', 'retrieve 1 line of service output per call']) +ServiceInfo(service_id=140138600699200, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=4631) +EventServiceQuery(event_id=2, timestamp=datetime.datetime(2018, 4, 3, 12, 41, 1, 797000), status=' ', service_id=140138600699200, action=None, parameters=['retrieve the version of the server engine']) +EventServiceQuery(event_id=3, timestamp=datetime.datetime(2018, 4, 3, 12, 41, 30, 784000), status=' ', service_id=140138600699200, action=None, parameters=['retrieve the implementation of the Firebird server']) +EventServiceQuery(event_id=4, timestamp=datetime.datetime(2018, 4, 3, 12, 56, 27, 559000), status=' ', service_id=140138600699200, action='Repair Database', parameters=[]) +""" + if sys.version_info.major == 2 and sys.version_info.minor == 7 and sys.version_info.micro > 13: + output = """ServiceInfo(service_id=140646174008648L, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=385) +EventServiceQuery(event_id=1, timestamp=datetime.datetime(2018, 3, 29, 14, 2, 10, 918000), status=' ', service_id=140646174008648L, action='Start Trace Session', parameters=['Receive portion of the query:', 'retrieve 1 line of service output per call']) +ServiceInfo(service_id=140138600699200L, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=4631) +EventServiceQuery(event_id=2, timestamp=datetime.datetime(2018, 4, 3, 12, 41, 1, 797000), status=' ', service_id=140138600699200L, action=None, parameters=['retrieve the version of the server engine']) +EventServiceQuery(event_id=3, timestamp=datetime.datetime(2018, 4, 3, 12, 41, 30, 784000), status=' ', service_id=140138600699200L, action=None, parameters=['retrieve the implementation of the Firebird server']) +EventServiceQuery(event_id=4, timestamp=datetime.datetime(2018, 4, 3, 12, 56, 27, 559000), status=' ', service_id=140138600699200L, action='Repair Database', parameters=[]) +""" + self._check_events(trace_lines, output) + def test_set_context(self): + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2014-05-23T11:00:28.6160 (3720:0000000000EFD9E8) START_TRANSACTION + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) + +2017-11-09T11:21:59.0270 (2500:0000000001A45B00) SET_CONTEXT + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) +[USER_TRANSACTION] TRANSACTION_TIMESTAMP = "2017-11-09 11:21:59.0270" + +2017-11-09T11:21:59.0300 (2500:0000000001A45B00) SET_CONTEXT + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + (TRA_1570, READ_COMMITTED | REC_VERSION | WAIT | READ_WRITE) +[USER_SESSION] MY_KEY = "1" +""" + output = """EventAttach(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), status=' ', attachment_id=8, database='/home/employee.fdb', charset='ISO88591', protocol='TCPv4', address='192.168.1.5', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventTransactionStart(event_id=2, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 616000), status=' ', attachment_id=8, transaction_id=1570, options=['READ_COMMITTED', 'REC_VERSION', 'WAIT', 'READ_WRITE']) +EventSetContext(event_id=3, timestamp=datetime.datetime(2017, 11, 9, 11, 21, 59, 27000), attachment_id=8, transaction_id=1570, context='USER_TRANSACTION', key='TRANSACTION_TIMESTAMP', value='"2017-11-09 11:21:59.0270"') +EventSetContext(event_id=4, timestamp=datetime.datetime(2017, 11, 9, 11, 21, 59, 30000), attachment_id=8, transaction_id=1570, context='USER_SESSION', key='MY_KEY', value='"1"') +""" + self._check_events(trace_lines, output) + def test_error(self): + trace_lines = """2018-03-22T10:06:59.5090 (4992:0x7f92a22a4978) ERROR AT jrd8_attach_database + /home/test.fdb (ATT_0, sysdba, NONE, TCPv4:127.0.0.1) + /usr/bin/flamerobin:4985 +335544344 : I/O error during "open" operation for file "/home/test.fdb" +335544734 : Error while trying to open file + 2 : No such file or directory + +2018-03-22T11:00:59.5090 (2500:0000000022415DB8) ERROR AT jrd8_fetch + /home/test.fdb (ATT_519417, SYSDBA:NONE, WIN1250, TCPv4:172.19.54.61) + /usr/bin/flamerobin:4985 +335544364 : request synchronization error + +2018-04-03T12:49:28.5080 (5831:0x7f748c054978) ERROR AT jrd8_service_query + service_mgr, (Service 0x7f748f839540, SYSDBA, TCPv4:127.0.0.1, /job/fbtrace:4631) +335544344 : I/O error during "open" operation for file "bug.fdb" +335544734 : Error while trying to open file + 2 : No such file or directory +""" + output = """AttachmentInfo(attachment_id=0, database='/home/test.fdb', charset='NONE', protocol='TCPv4', address='127.0.0.1', user='sysdba', role='NONE', remote_process='/usr/bin/flamerobin', remote_pid=4985) +EventError(event_id=1, timestamp=datetime.datetime(2018, 3, 22, 10, 6, 59, 509000), attachment_id=0, place='jrd8_attach_database', details=['335544344 : I/O error during "open" operation for file "/home/test.fdb"', '335544734 : Error while trying to open file', '2 : No such file or directory']) +AttachmentInfo(attachment_id=519417, database='/home/test.fdb', charset='WIN1250', protocol='TCPv4', address='172.19.54.61', user='SYSDBA', role='NONE', remote_process='/usr/bin/flamerobin', remote_pid=4985) +EventError(event_id=2, timestamp=datetime.datetime(2018, 3, 22, 11, 0, 59, 509000), attachment_id=519417, place='jrd8_fetch', details=['335544364 : request synchronization error']) +ServiceInfo(service_id=140138600699200, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=4631) +EventServiceError(event_id=3, timestamp=datetime.datetime(2018, 4, 3, 12, 49, 28, 508000), service_id=140138600699200, place='jrd8_service_query', details=['335544344 : I/O error during "open" operation for file "bug.fdb"', '335544734 : Error while trying to open file', '2 : No such file or directory']) +""" + if sys.version_info.major == 2 and sys.version_info.minor == 7 and sys.version_info.micro > 13: + output = """AttachmentInfo(attachment_id=0, database='/home/test.fdb', charset='NONE', protocol='TCPv4', address='127.0.0.1', user='sysdba', role='NONE', remote_process='/usr/bin/flamerobin', remote_pid=4985) +EventError(event_id=1, timestamp=datetime.datetime(2018, 3, 22, 10, 6, 59, 509000), attachment_id=0, place='jrd8_attach_database', details=['335544344 : I/O error during "open" operation for file "/home/test.fdb"', '335544734 : Error while trying to open file', '2 : No such file or directory']) +AttachmentInfo(attachment_id=519417, database='/home/test.fdb', charset='WIN1250', protocol='TCPv4', address='172.19.54.61', user='SYSDBA', role='NONE', remote_process='/usr/bin/flamerobin', remote_pid=4985) +EventError(event_id=2, timestamp=datetime.datetime(2018, 3, 22, 11, 0, 59, 509000), attachment_id=519417, place='jrd8_fetch', details=['335544364 : request synchronization error']) +ServiceInfo(service_id=140138600699200L, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=4631) +EventServiceError(event_id=3, timestamp=datetime.datetime(2018, 4, 3, 12, 49, 28, 508000), service_id=140138600699200L, place='jrd8_service_query', details=['335544344 : I/O error during "open" operation for file "bug.fdb"', '335544734 : Error while trying to open file', '2 : No such file or directory']) +""" + self._check_events(trace_lines, output) + def test_warning(self): + trace_lines = """2018-03-22T10:06:59.5090 (4992:0x7f92a22a4978) WARNING AT jrd8_attach_database + /home/test.fdb (ATT_0, sysdba, NONE, TCPv4:127.0.0.1) + /usr/bin/flamerobin:4985 +Some reason for the warning. + +2018-04-03T12:49:28.5080 (5831:0x7f748c054978) WARNING AT jrd8_service_query + service_mgr, (Service 0x7f748f839540, SYSDBA, TCPv4:127.0.0.1, /job/fbtrace:4631) +Some reason for the warning. +""" + output = """AttachmentInfo(attachment_id=0, database='/home/test.fdb', charset='NONE', protocol='TCPv4', address='127.0.0.1', user='sysdba', role='NONE', remote_process='/usr/bin/flamerobin', remote_pid=4985) +EventWarning(event_id=1, timestamp=datetime.datetime(2018, 3, 22, 10, 6, 59, 509000), attachment_id=0, place='jrd8_attach_database', details=['Some reason for the warning.']) +ServiceInfo(service_id=140138600699200, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=4631) +EventServiceWarning(event_id=2, timestamp=datetime.datetime(2018, 4, 3, 12, 49, 28, 508000), service_id=140138600699200, place='jrd8_service_query', details=['Some reason for the warning.']) +""" + if sys.version_info.major == 2 and sys.version_info.minor == 7 and sys.version_info.micro > 13: + output = """AttachmentInfo(attachment_id=0, database='/home/test.fdb', charset='NONE', protocol='TCPv4', address='127.0.0.1', user='sysdba', role='NONE', remote_process='/usr/bin/flamerobin', remote_pid=4985) +EventWarning(event_id=1, timestamp=datetime.datetime(2018, 3, 22, 10, 6, 59, 509000), attachment_id=0, place='jrd8_attach_database', details=['Some reason for the warning.']) +ServiceInfo(service_id=140138600699200L, user='SYSDBA', protocol='TCPv4', address='127.0.0.1', remote_process='/job/fbtrace', remote_pid=4631) +EventServiceWarning(event_id=2, timestamp=datetime.datetime(2018, 4, 3, 12, 49, 28, 508000), service_id=140138600699200L, place='jrd8_service_query', details=['Some reason for the warning.']) +""" + self._check_events(trace_lines, output) + def test_sweep_start(self): + trace_lines = """2018-03-22T17:33:56.9690 (12351:0x7f0174bdd978) SWEEP_START + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, TCPv4:127.0.0.1) + +Transaction counters: + Oldest interesting 155 + Oldest active 156 + Oldest snapshot 156 + Next transaction 156 + +2018-03-22T18:33:56.9690 (12351:0x7f0174bdd978) SWEEP_START + /opt/firebird/examples/empbuild/employee.fdb (ATT_9, SYSDBA:NONE, NONE, TCPv4:127.0.0.1) + /opt/firebird/bin/isql:8723 + +Transaction counters: + Oldest interesting 155 + Oldest active 156 + Oldest snapshot 156 + Next transaction 156 +""" + output = """AttachmentInfo(attachment_id=8, database='/opt/firebird/examples/empbuild/employee.fdb', charset='NONE', protocol='TCPv4', address='127.0.0.1', user='SYSDBA', role='NONE', remote_process=None, remote_pid=None) +EventSweepStart(event_id=1, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 56, 969000), attachment_id=8, oit=155, oat=156, ost=156, next=156) +AttachmentInfo(attachment_id=9, database='/opt/firebird/examples/empbuild/employee.fdb', charset='NONE', protocol='TCPv4', address='127.0.0.1', user='SYSDBA', role='NONE', remote_process='/opt/firebird/bin/isql', remote_pid=8723) +EventSweepStart(event_id=2, timestamp=datetime.datetime(2018, 3, 22, 18, 33, 56, 969000), attachment_id=9, oit=155, oat=156, ost=156, next=156) +""" + self._check_events(trace_lines, output) + def test_sweep_progress(self): + trace_lines = """2018-03-22T17:33:56.9820 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 5 fetch(es) + +2018-03-22T17:33:56.9830 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 6 read(s), 409 fetch(es) + +2018-03-22T17:33:56.9920 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 9 ms, 5 read(s), 345 fetch(es), 39 mark(s) + +2018-03-22T17:33:56.9930 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 4 read(s), 251 fetch(es), 24 mark(s) + +2018-03-22T17:33:57.0000 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 7 ms, 14 read(s), 877 fetch(es), 4 mark(s) + +2018-03-22T17:33:57.0000 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 115 fetch(es) + +2018-03-22T17:33:57.0000 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 7 fetch(es) + +2018-03-22T17:33:57.0020 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 1 ms, 2 read(s), 25 fetch(es) + +2018-03-22T17:33:57.0070 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 5 ms, 4 read(s), 1 write(s), 339 fetch(es), 97 mark(s) + +2018-03-22T17:33:57.0090 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 2 ms, 6 read(s), 1 write(s), 467 fetch(es) + +2018-03-22T17:33:57.0100 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 149 fetch(es) + +2018-03-22T17:33:57.0930 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 83 ms, 11 read(s), 8 write(s), 2307 fetch(es), 657 mark(s) + +2018-03-22T17:33:57.1010 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 7 ms, 2 read(s), 1 write(s), 7 fetch(es) + +2018-03-22T17:33:57.1010 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 17 fetch(es) + +2018-03-22T17:33:57.1010 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 75 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 10 ms, 5 read(s), 305 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 25 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 7 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 1 read(s), 165 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 31 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 1 read(s), 141 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 5 read(s), 29 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 69 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 107 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 303 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 13 fetch(es) + +2018-03-22T17:33:57.1120 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 5 fetch(es) + +2018-03-22T17:33:57.1130 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 31 fetch(es) + +2018-03-22T17:33:57.1130 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 6 read(s), 285 fetch(es), 60 mark(s) + +2018-03-22T17:33:57.1350 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 8 ms, 2 read(s), 1 write(s), 45 fetch(es) + +2018-03-22T17:33:57.1350 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 3 read(s), 89 fetch(es) + +2018-03-22T17:33:57.1350 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 3 read(s), 61 fetch(es), 12 mark(s) + +2018-03-22T17:33:57.1420 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 7 ms, 2 read(s), 1 write(s), 59 fetch(es) + +2018-03-22T17:33:57.1480 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 5 ms, 3 read(s), 1 write(s), 206 fetch(es), 48 mark(s) + +2018-03-22T17:33:57.1510 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 2 ms, 2 read(s), 1 write(s), 101 fetch(es) + +2018-03-22T17:33:57.1510 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 33 fetch(es) + +2018-03-22T17:33:57.1510 (12351:0x7f0174bdd978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 69 fetch(es) +""" + output = """AttachmentInfo(attachment_id=8, database='/opt/firebird/examples/empbuild/employee.fdb', charset='NONE', protocol='', address='', user='SYSDBA', role='NONE', remote_process=None, remote_pid=None) +EventSweepProgress(event_id=1, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 56, 982000), attachment_id=8, run_time=0, reads=None, writes=None, fetches=5, marks=None, access=None) +EventSweepProgress(event_id=2, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 56, 983000), attachment_id=8, run_time=0, reads=6, writes=None, fetches=409, marks=None, access=None) +EventSweepProgress(event_id=3, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 56, 992000), attachment_id=8, run_time=9, reads=5, writes=None, fetches=345, marks=39, access=None) +EventSweepProgress(event_id=4, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 56, 993000), attachment_id=8, run_time=0, reads=4, writes=None, fetches=251, marks=24, access=None) +EventSweepProgress(event_id=5, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57), attachment_id=8, run_time=7, reads=14, writes=None, fetches=877, marks=4, access=None) +EventSweepProgress(event_id=6, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57), attachment_id=8, run_time=0, reads=2, writes=None, fetches=115, marks=None, access=None) +EventSweepProgress(event_id=7, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57), attachment_id=8, run_time=0, reads=2, writes=None, fetches=7, marks=None, access=None) +EventSweepProgress(event_id=8, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 2000), attachment_id=8, run_time=1, reads=2, writes=None, fetches=25, marks=None, access=None) +EventSweepProgress(event_id=9, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 7000), attachment_id=8, run_time=5, reads=4, writes=1, fetches=339, marks=97, access=None) +EventSweepProgress(event_id=10, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 9000), attachment_id=8, run_time=2, reads=6, writes=1, fetches=467, marks=None, access=None) +EventSweepProgress(event_id=11, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 10000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=149, marks=None, access=None) +EventSweepProgress(event_id=12, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 93000), attachment_id=8, run_time=83, reads=11, writes=8, fetches=2307, marks=657, access=None) +EventSweepProgress(event_id=13, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 101000), attachment_id=8, run_time=7, reads=2, writes=1, fetches=7, marks=None, access=None) +EventSweepProgress(event_id=14, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 101000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=17, marks=None, access=None) +EventSweepProgress(event_id=15, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 101000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=75, marks=None, access=None) +EventSweepProgress(event_id=16, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=10, reads=5, writes=None, fetches=305, marks=None, access=None) +EventSweepProgress(event_id=17, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=25, marks=None, access=None) +EventSweepProgress(event_id=18, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=7, marks=None, access=None) +EventSweepProgress(event_id=19, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=1, writes=None, fetches=165, marks=None, access=None) +EventSweepProgress(event_id=20, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=31, marks=None, access=None) +EventSweepProgress(event_id=21, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=1, writes=None, fetches=141, marks=None, access=None) +EventSweepProgress(event_id=22, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=5, writes=None, fetches=29, marks=None, access=None) +EventSweepProgress(event_id=23, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=69, marks=None, access=None) +EventSweepProgress(event_id=24, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=None, writes=None, fetches=107, marks=None, access=None) +EventSweepProgress(event_id=25, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=303, marks=None, access=None) +EventSweepProgress(event_id=26, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=13, marks=None, access=None) +EventSweepProgress(event_id=27, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 112000), attachment_id=8, run_time=0, reads=None, writes=None, fetches=5, marks=None, access=None) +EventSweepProgress(event_id=28, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 113000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=31, marks=None, access=None) +EventSweepProgress(event_id=29, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 113000), attachment_id=8, run_time=0, reads=6, writes=None, fetches=285, marks=60, access=None) +EventSweepProgress(event_id=30, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 135000), attachment_id=8, run_time=8, reads=2, writes=1, fetches=45, marks=None, access=None) +EventSweepProgress(event_id=31, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 135000), attachment_id=8, run_time=0, reads=3, writes=None, fetches=89, marks=None, access=None) +EventSweepProgress(event_id=32, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 135000), attachment_id=8, run_time=0, reads=3, writes=None, fetches=61, marks=12, access=None) +EventSweepProgress(event_id=33, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 142000), attachment_id=8, run_time=7, reads=2, writes=1, fetches=59, marks=None, access=None) +EventSweepProgress(event_id=34, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 148000), attachment_id=8, run_time=5, reads=3, writes=1, fetches=206, marks=48, access=None) +EventSweepProgress(event_id=35, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 151000), attachment_id=8, run_time=2, reads=2, writes=1, fetches=101, marks=None, access=None) +EventSweepProgress(event_id=36, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 151000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=33, marks=None, access=None) +EventSweepProgress(event_id=37, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 151000), attachment_id=8, run_time=0, reads=2, writes=None, fetches=69, marks=None, access=None) +""" + self._check_events(trace_lines, output) + def test_sweep_progress_performance(self): + trace_lines = """2018-03-29T15:23:01.3050 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 2 ms, 1 read(s), 11 fetch(es), 2 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$DATABASE 1 1 + +2018-03-29T15:23:01.3130 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 7 ms, 8 read(s), 436 fetch(es), 9 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$FIELDS 199 3 + +2018-03-29T15:23:01.3150 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 1 ms, 4 read(s), 229 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$INDEX_SEGMENTS 111 + +2018-03-29T15:23:01.3150 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 3 read(s), 179 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$INDICES 87 + +2018-03-29T15:23:01.3370 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 21 ms, 18 read(s), 1 write(s), 927 fetch(es), 21 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$RELATION_FIELDS 420 4 + +2018-03-29T15:23:01.3440 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 7 ms, 2 read(s), 1 write(s), 143 fetch(es), 10 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$RELATIONS 53 2 + +2018-03-29T15:23:01.3610 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 17 ms, 2 read(s), 1 write(s), 7 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$VIEW_RELATIONS 2 + +2018-03-29T15:23:01.3610 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 25 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$FORMATS 11 + +2018-03-29T15:23:01.3860 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 24 ms, 5 read(s), 1 write(s), 94 fetch(es), 4 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$SECURITY_CLASSES 39 1 + +2018-03-29T15:23:01.3940 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 7 ms, 6 read(s), 467 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$TYPES 228 + +2018-03-29T15:23:01.3960 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 1 ms, 2 read(s), 149 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$TRIGGERS 67 + +2018-03-29T15:23:01.3980 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 1 ms, 8 read(s), 341 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$DEPENDENCIES 163 + +2018-03-29T15:23:01.3980 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 7 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$FUNCTIONS 2 + +2018-03-29T15:23:01.3980 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 17 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$FUNCTION_ARGUMENTS 7 + +2018-03-29T15:23:01.3980 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 75 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$TRIGGER_MESSAGES 36 + +2018-03-29T15:23:01.3990 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 1 ms, 5 read(s), 305 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$USER_PRIVILEGES 148 + +2018-03-29T15:23:01.4230 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 25 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$GENERATORS 11 + +2018-03-29T15:23:01.4230 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 7 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$FIELD_DIMENSIONS 2 + +2018-03-29T15:23:01.4230 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 1 read(s), 165 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$RELATION_CONSTRAINTS 80 + +2018-03-29T15:23:01.4230 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 31 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$REF_CONSTRAINTS 14 + +2018-03-29T15:23:01.4290 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 5 ms, 1 read(s), 141 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$CHECK_CONSTRAINTS 68 + +2018-03-29T15:23:01.4300 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 5 read(s), 29 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$PROCEDURES 10 + +2018-03-29T15:23:01.4300 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 69 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$PROCEDURE_PARAMETERS 33 + +2018-03-29T15:23:01.4300 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 107 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$CHARACTER_SETS 52 + +2018-03-29T15:23:01.4300 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 303 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$COLLATIONS 148 + +2018-03-29T15:23:01.4310 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 13 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$EXCEPTIONS 5 + +2018-03-29T15:23:01.4310 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 5 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +RDB$ROLES 1 + +2018-03-29T15:23:01.4310 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 31 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +COUNTRY 14 + +2018-03-29T15:23:01.4310 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 4 read(s), 69 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +JOB 31 + +2018-03-29T15:23:01.4310 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 45 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +DEPARTMENT 21 + +2018-03-29T15:23:01.4310 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 3 read(s), 89 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +EMPLOYEE 42 + +2018-03-29T15:23:01.4310 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 15 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +PROJECT 6 + +2018-03-29T15:23:01.4310 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 59 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +EMPLOYEE_PROJECT 28 + +2018-03-29T15:23:01.4320 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 51 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +PROJ_DEPT_BUDGET 24 + +2018-03-29T15:23:01.4320 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 101 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +SALARY_HISTORY 49 + +2018-03-29T15:23:01.4320 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 33 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +CUSTOMER 15 + +2018-03-29T15:23:01.4320 (7035:0x7fde644e4978) SWEEP_PROGRESS + /opt/firebird/examples/empbuild/employee.fdb (ATT_24, SYSDBA:NONE, NONE, ) + 0 ms, 2 read(s), 69 fetch(es) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +SALES 33 +""" + output = """AttachmentInfo(attachment_id=24, database='/opt/firebird/examples/empbuild/employee.fdb', charset='NONE', protocol='', address='', user='SYSDBA', role='NONE', remote_process=None, remote_pid=None) +EventSweepProgress(event_id=1, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 305000), attachment_id=24, run_time=2, reads=1, writes=None, fetches=11, marks=2, access=[AccessTuple(table='RDB$DATABASE', natural=1, index=0, update=0, insert=0, delete=0, backout=0, purge=1, expunge=0)]) +EventSweepProgress(event_id=2, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 313000), attachment_id=24, run_time=7, reads=8, writes=None, fetches=436, marks=9, access=[AccessTuple(table='RDB$FIELDS', natural=199, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=3)]) +EventSweepProgress(event_id=3, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 315000), attachment_id=24, run_time=1, reads=4, writes=None, fetches=229, marks=None, access=[AccessTuple(table='RDB$INDEX_SEGMENTS', natural=111, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=4, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 315000), attachment_id=24, run_time=0, reads=3, writes=None, fetches=179, marks=None, access=[AccessTuple(table='RDB$INDICES', natural=87, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=5, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 337000), attachment_id=24, run_time=21, reads=18, writes=1, fetches=927, marks=21, access=[AccessTuple(table='RDB$RELATION_FIELDS', natural=420, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=4)]) +EventSweepProgress(event_id=6, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 344000), attachment_id=24, run_time=7, reads=2, writes=1, fetches=143, marks=10, access=[AccessTuple(table='RDB$RELATIONS', natural=53, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=2)]) +EventSweepProgress(event_id=7, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 361000), attachment_id=24, run_time=17, reads=2, writes=1, fetches=7, marks=None, access=[AccessTuple(table='RDB$VIEW_RELATIONS', natural=2, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=8, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 361000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=25, marks=None, access=[AccessTuple(table='RDB$FORMATS', natural=11, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=9, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 386000), attachment_id=24, run_time=24, reads=5, writes=1, fetches=94, marks=4, access=[AccessTuple(table='RDB$SECURITY_CLASSES', natural=39, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=1)]) +EventSweepProgress(event_id=10, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 394000), attachment_id=24, run_time=7, reads=6, writes=None, fetches=467, marks=None, access=[AccessTuple(table='RDB$TYPES', natural=228, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=11, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 396000), attachment_id=24, run_time=1, reads=2, writes=None, fetches=149, marks=None, access=[AccessTuple(table='RDB$TRIGGERS', natural=67, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=12, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 398000), attachment_id=24, run_time=1, reads=8, writes=None, fetches=341, marks=None, access=[AccessTuple(table='RDB$DEPENDENCIES', natural=163, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=13, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 398000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=7, marks=None, access=[AccessTuple(table='RDB$FUNCTIONS', natural=2, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=14, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 398000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=17, marks=None, access=[AccessTuple(table='RDB$FUNCTION_ARGUMENTS', natural=7, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=15, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 398000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=75, marks=None, access=[AccessTuple(table='RDB$TRIGGER_MESSAGES', natural=36, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=16, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 399000), attachment_id=24, run_time=1, reads=5, writes=None, fetches=305, marks=None, access=[AccessTuple(table='RDB$USER_PRIVILEGES', natural=148, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=17, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 423000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=25, marks=None, access=[AccessTuple(table='RDB$GENERATORS', natural=11, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=18, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 423000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=7, marks=None, access=[AccessTuple(table='RDB$FIELD_DIMENSIONS', natural=2, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=19, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 423000), attachment_id=24, run_time=0, reads=1, writes=None, fetches=165, marks=None, access=[AccessTuple(table='RDB$RELATION_CONSTRAINTS', natural=80, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=20, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 423000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=31, marks=None, access=[AccessTuple(table='RDB$REF_CONSTRAINTS', natural=14, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=21, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 429000), attachment_id=24, run_time=5, reads=1, writes=None, fetches=141, marks=None, access=[AccessTuple(table='RDB$CHECK_CONSTRAINTS', natural=68, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=22, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 430000), attachment_id=24, run_time=0, reads=5, writes=None, fetches=29, marks=None, access=[AccessTuple(table='RDB$PROCEDURES', natural=10, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=23, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 430000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=69, marks=None, access=[AccessTuple(table='RDB$PROCEDURE_PARAMETERS', natural=33, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=24, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 430000), attachment_id=24, run_time=0, reads=None, writes=None, fetches=107, marks=None, access=[AccessTuple(table='RDB$CHARACTER_SETS', natural=52, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=25, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 430000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=303, marks=None, access=[AccessTuple(table='RDB$COLLATIONS', natural=148, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=26, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 431000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=13, marks=None, access=[AccessTuple(table='RDB$EXCEPTIONS', natural=5, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=27, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 431000), attachment_id=24, run_time=0, reads=None, writes=None, fetches=5, marks=None, access=[AccessTuple(table='RDB$ROLES', natural=1, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=28, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 431000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=31, marks=None, access=[AccessTuple(table='COUNTRY', natural=14, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=29, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 431000), attachment_id=24, run_time=0, reads=4, writes=None, fetches=69, marks=None, access=[AccessTuple(table='JOB', natural=31, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=30, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 431000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=45, marks=None, access=[AccessTuple(table='DEPARTMENT', natural=21, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=31, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 431000), attachment_id=24, run_time=0, reads=3, writes=None, fetches=89, marks=None, access=[AccessTuple(table='EMPLOYEE', natural=42, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=32, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 431000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=15, marks=None, access=[AccessTuple(table='PROJECT', natural=6, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=33, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 431000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=59, marks=None, access=[AccessTuple(table='EMPLOYEE_PROJECT', natural=28, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=34, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 432000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=51, marks=None, access=[AccessTuple(table='PROJ_DEPT_BUDGET', natural=24, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=35, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 432000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=101, marks=None, access=[AccessTuple(table='SALARY_HISTORY', natural=49, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=36, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 432000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=33, marks=None, access=[AccessTuple(table='CUSTOMER', natural=15, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +EventSweepProgress(event_id=37, timestamp=datetime.datetime(2018, 3, 29, 15, 23, 1, 432000), attachment_id=24, run_time=0, reads=2, writes=None, fetches=69, marks=None, access=[AccessTuple(table='SALES', natural=33, index=0, update=0, insert=0, delete=0, backout=0, purge=0, expunge=0)]) +""" + self._check_events(trace_lines, output) + def test_sweep_finish(self): + trace_lines = """2018-03-22T17:33:57.2270 (12351:0x7f0174bdd978) SWEEP_FINISH + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) + +Transaction counters: + Oldest interesting 156 + Oldest active 156 + Oldest snapshot 156 + Next transaction 157 + 257 ms, 177 read(s), 30 write(s), 8279 fetch(es), 945 mark(s) + +""" + output = """AttachmentInfo(attachment_id=8, database='/opt/firebird/examples/empbuild/employee.fdb', charset='NONE', protocol='', address='', user='SYSDBA', role='NONE', remote_process=None, remote_pid=None) +EventSweepFinish(event_id=1, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 227000), attachment_id=8, oit=156, oat=156, ost=156, next=157, run_time=257, reads=177, writes=30, fetches=8279, marks=945) +""" + self._check_events(trace_lines, output) + def test_sweep_finish(self): + trace_lines = """2018-03-22T17:33:57.2270 (12351:0x7f0174bdd978) SWEEP_FAILED + /opt/firebird/examples/empbuild/employee.fdb (ATT_8, SYSDBA:NONE, NONE, ) +""" + output = """AttachmentInfo(attachment_id=8, database='/opt/firebird/examples/empbuild/employee.fdb', charset='NONE', protocol='', address='', user='SYSDBA', role='NONE', remote_process=None, remote_pid=None) +EventSweepFailed(event_id=1, timestamp=datetime.datetime(2018, 3, 22, 17, 33, 57, 227000), attachment_id=8) +""" + self._check_events(trace_lines, output) + def test_blr_compile(self): + trace_lines = """2018-04-03T17:00:43.4270 (9772:0x7f2c5004b978) COMPILE_BLR + /home/data/db/employee.fdb (ATT_5, SYSDBA:NONE, NONE, TCPv4:127.0.0.1) + /bin/python:9737 +------------------------------------------------------------------------------- + 0 blr_version5, + 1 blr_begin, + 2 blr_message, 0, 4,0, + 6 blr_varying2, 0,0, 15,0, + 11 blr_varying2, 0,0, 10,0, + 16 blr_short, 0, + 18 blr_short, 0, + 20 blr_loop, + 21 blr_receive, 0, + 23 blr_store, + 24 blr_relation, 7, 'C','O','U','N','T','R','Y', 0, + 34 blr_begin, + 35 blr_assignment, + 36 blr_parameter2, 0, 0,0, 2,0, + 42 blr_field, 0, 7, 'C','O','U','N','T','R','Y', + 52 blr_assignment, + 53 blr_parameter2, 0, 1,0, 3,0, + 59 blr_field, 0, 8, 'C','U','R','R','E','N','C','Y', + 70 blr_end, + 71 blr_end, + 72 blr_eoc + + 0 ms + +2018-04-03T17:00:43.4270 (9772:0x7f2c5004b978) COMPILE_BLR + /home/data/db/employee.fdb (ATT_5, SYSDBA:NONE, NONE, TCPv4:127.0.0.1) + /bin/python:9737 +------------------------------------------------------------------------------- + 0 blr_version5, + 1 blr_begin, + 2 blr_message, 0, 4,0, + 6 blr_varying2, 0,0, 15,0, + 11 blr_varying2, 0,0, 10,0, + 16 blr_short, 0 +... + 0 ms + +2018-04-03T17:00:43.4270 (9772:0x7f2c5004b978) COMPILE_BLR + /home/data/db/employee.fdb (ATT_5, SYSDBA:NONE, NONE, TCPv4:127.0.0.1) + /bin/python:9737 + +Statement 22: + 0 ms +""" + output = """AttachmentInfo(attachment_id=5, database='/home/data/db/employee.fdb', charset='NONE', protocol='TCPv4', address='127.0.0.1', user='SYSDBA', role='NONE', remote_process='/bin/python', remote_pid=9737) +EventBLRCompile(event_id=1, timestamp=datetime.datetime(2018, 4, 3, 17, 0, 43, 427000), status=' ', attachment_id=5, statement_id=None, content="0 blr_version5,\\n1 blr_begin,\\n2 blr_message, 0, 4,0,\\n6 blr_varying2, 0,0, 15,0,\\n11 blr_varying2, 0,0, 10,0,\\n16 blr_short, 0,\\n18 blr_short, 0,\\n20 blr_loop,\\n21 blr_receive, 0,\\n23 blr_store,\\n24 blr_relation, 7, 'C','O','U','N','T','R','Y', 0,\\n34 blr_begin,\\n35 blr_assignment,\\n36 blr_parameter2, 0, 0,0, 2,0,\\n42 blr_field, 0, 7, 'C','O','U','N','T','R','Y',\\n52 blr_assignment,\\n53 blr_parameter2, 0, 1,0, 3,0,\\n59 blr_field, 0, 8, 'C','U','R','R','E','N','C','Y',\\n70 blr_end,\\n71 blr_end,\\n72 blr_eoc", prepare_time=0) +EventBLRCompile(event_id=2, timestamp=datetime.datetime(2018, 4, 3, 17, 0, 43, 427000), status=' ', attachment_id=5, statement_id=None, content='0 blr_version5,\\n1 blr_begin,\\n2 blr_message, 0, 4,0,\\n6 blr_varying2, 0,0, 15,0,\\n11 blr_varying2, 0,0, 10,0,\\n16 blr_short, 0\\n...', prepare_time=0) +EventBLRCompile(event_id=3, timestamp=datetime.datetime(2018, 4, 3, 17, 0, 43, 427000), status=' ', attachment_id=5, statement_id=22, content=None, prepare_time=0) +""" + self._check_events(trace_lines, output) + def test_blr_execute(self): + trace_lines = """2018-04-03T17:00:43.4280 (9772:0x7f2c5004b978) EXECUTE_BLR + /home/data/db/employee.fdb (ATT_5, SYSDBA:NONE, NONE, TCPv4:127.0.0.1) + /home/job/python/envs/pyfirebird/bin/python:9737 + (TRA_9, CONCURRENCY | NOWAIT | READ_WRITE) +------------------------------------------------------------------------------- + 0 blr_version5, + 1 blr_begin, + 2 blr_message, 0, 4,0, + 6 blr_varying2, 0,0, 15,0, + 11 blr_varying2, 0,0, 10,0, + 16 blr_short, 0, + 18 blr_short, 0, + 20 blr_loop, + 21 blr_receive, 0, + 23 blr_store, + 24 blr_relation, 7, 'C','O','U','N','T','R','Y', 0, + 34 blr_begin, + 35 blr_assignment, + 36 blr_parameter2, 0, 0,0, 2,0, + 42 blr_field, 0, 7, 'C','O','U','N','T','R','Y', + 52 blr_assignment, + 53 blr_parameter2, 0, 1,0, 3,0, + 59 blr_field, 0, 8, 'C','U','R','R','E','N','C','Y', + 70 blr_end, + 71 blr_end, + 72 blr_eoc + + 0 ms, 3 read(s), 7 fetch(es), 5 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +COUNTRY 1 + +2018-04-03T17:00:43.4280 (9772:0x7f2c5004b978) EXECUTE_BLR + /home/data/db/employee.fdb (ATT_5, SYSDBA:NONE, NONE, TCPv4:127.0.0.1) + /home/job/python/envs/pyfirebird/bin/python:9737 + (TRA_9, CONCURRENCY | NOWAIT | READ_WRITE) +------------------------------------------------------------------------------- + 0 blr_version5, + 1 blr_begin, + 2 blr_message, 0, 4,0, + 6 blr_varying2, 0,0, 15,0, + 11 blr_varying2, 0,0, 10,0, + 16 blr_short, 0, + 18 blr_short, 0... + 0 ms, 3 read(s), 7 fetch(es), 5 mark(s) + +Table Natural Index Update Insert Delete Backout Purge Expunge +*************************************************************************************************************** +COUNTRY 1 + +2018-04-03T17:00:43.4280 (9772:0x7f2c5004b978) EXECUTE_BLR + /home/data/db/employee.fdb (ATT_5, SYSDBA:NONE, NONE, TCPv4:127.0.0.1) + /home/job/python/envs/pyfirebird/bin/python:9737 + (TRA_9, CONCURRENCY | NOWAIT | READ_WRITE) +Statement 22: + 0 ms, 3 read(s), 7 fetch(es), 5 mark(s) +""" + output = """AttachmentInfo(attachment_id=5, database='/home/data/db/employee.fdb', charset='NONE', protocol='TCPv4', address='127.0.0.1', user='SYSDBA', role='NONE', remote_process='/home/job/python/envs/pyfirebird/bin/python', remote_pid=9737) +TransactionInfo(attachment_id=5, transaction_id=9, options=['CONCURRENCY', 'NOWAIT', 'READ_WRITE']) +EventBLRExecute(event_id=1, timestamp=datetime.datetime(2018, 4, 3, 17, 0, 43, 428000), status=' ', attachment_id=5, transaction_id=9, statement_id=None, content="0 blr_version5,\\n1 blr_begin,\\n2 blr_message, 0, 4,0,\\n6 blr_varying2, 0,0, 15,0,\\n11 blr_varying2, 0,0, 10,0,\\n16 blr_short, 0,\\n18 blr_short, 0,\\n20 blr_loop,\\n21 blr_receive, 0,\\n23 blr_store,\\n24 blr_relation, 7, 'C','O','U','N','T','R','Y', 0,\\n34 blr_begin,\\n35 blr_assignment,\\n36 blr_parameter2, 0, 0,0, 2,0,\\n42 blr_field, 0, 7, 'C','O','U','N','T','R','Y',\\n52 blr_assignment,\\n53 blr_parameter2, 0, 1,0, 3,0,\\n59 blr_field, 0, 8, 'C','U','R','R','E','N','C','Y',\\n70 blr_end,\\n71 blr_end,\\n72 blr_eoc", run_time=0, reads=3, writes=None, fetches=7, marks=5, access=[AccessTuple(table='COUNTRY', natural=0, index=0, update=0, insert=1, delete=0, backout=0, purge=0, expunge=0)]) +EventBLRExecute(event_id=2, timestamp=datetime.datetime(2018, 4, 3, 17, 0, 43, 428000), status=' ', attachment_id=5, transaction_id=9, statement_id=None, content='0 blr_version5,\\n1 blr_begin,\\n2 blr_message, 0, 4,0,\\n6 blr_varying2, 0,0, 15,0,\\n11 blr_varying2, 0,0, 10,0,\\n16 blr_short, 0,\\n18 blr_short, 0...', run_time=0, reads=3, writes=None, fetches=7, marks=5, access=[AccessTuple(table='COUNTRY', natural=0, index=0, update=0, insert=1, delete=0, backout=0, purge=0, expunge=0)]) +EventBLRExecute(event_id=3, timestamp=datetime.datetime(2018, 4, 3, 17, 0, 43, 428000), status=' ', attachment_id=5, transaction_id=9, statement_id=22, content=None, run_time=0, reads=3, writes=None, fetches=7, marks=5, access=None) +""" + self._check_events(trace_lines, output) + def test_dyn_execute(self): + trace_lines = """2018-04-03T17:42:53.5590 (10474:0x7f0d8b4f0978) EXECUTE_DYN + /opt/firebird/examples/empbuild/employee.fdb (ATT_40, SYSDBA:NONE, NONE, ) + (TRA_221, CONCURRENCY | WAIT | READ_WRITE) +------------------------------------------------------------------------------- + 0 gds__dyn_version_1, + 1 gds__dyn_delete_rel, 1,0, 'T', + 5 gds__dyn_end, + 0 gds__dyn_eoc + 20 ms +2018-04-03T17:43:21.3650 (10474:0x7f0d8b4f0978) EXECUTE_DYN + /opt/firebird/examples/empbuild/employee.fdb (ATT_40, SYSDBA:NONE, NONE, ) + (TRA_222, CONCURRENCY | WAIT | READ_WRITE) +------------------------------------------------------------------------------- + 0 gds__dyn_version_1, + 1 gds__dyn_begin, + 2 gds__dyn_def_local_fld, 31,0, 'C','O','U','N','T','R','Y',32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, + 36 gds__dyn_fld_source, 31,0, 'C','O','U','N','T','R','Y','N','A','M','E',32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, + 70 gds__dyn_rel_name, 1,0, 'T', + 74 gds__dyn_fld_position, 2,0, 0,0, + 79 gds__dyn_update_flag, 2,0, 1,0, + 84 gds__dyn_system_flag, 2,0, 0,0, + 89 gds__dyn_end, + 90 gds__dyn_def_sql_fld, 31,0, 'C','U','R','R','E','N','C','Y',32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, + 124 gds__dyn_fld_type, 2,0, 37,0, + 129 gds__dyn_fld_length, 2,0, 10,0, + 134 gds__dyn_fld_scale, 2,0, 0,0, + 139 gds__dyn_rel_name, 1,0, 'T', + 143 gds__dyn_fld_position, 2,0, 1,0, + 148 gds__dyn_update_flag, 2,0, 1,0, + 153 gds__dyn_system_flag, 2,0, 0,0, + 158 gds__dyn_end, + 159 gds__dyn_end, + 0 gds__dyn_eoc + 0 ms +2018-03-29T13:28:45.8910 (5265:0x7f71ed580978) EXECUTE_DYN + /opt/firebird/examples/empbuild/employee.fdb (ATT_20, SYSDBA:NONE, NONE, ) + (TRA_189, CONCURRENCY | WAIT | READ_WRITE) + 26 ms +""" + output = """AttachmentInfo(attachment_id=40, database='/opt/firebird/examples/empbuild/employee.fdb', charset='NONE', protocol='', address='', user='SYSDBA', role='NONE', remote_process=None, remote_pid=None) +TransactionInfo(attachment_id=40, transaction_id=221, options=['CONCURRENCY', 'WAIT', 'READ_WRITE']) +EventDYNExecute(event_id=1, timestamp=datetime.datetime(2018, 4, 3, 17, 42, 53, 559000), status=' ', attachment_id=40, transaction_id=221, content="0 gds__dyn_version_1,\\n1 gds__dyn_delete_rel, 1,0, 'T',\\n5 gds__dyn_end,\\n0 gds__dyn_eoc", run_time=20) +TransactionInfo(attachment_id=40, transaction_id=222, options=['CONCURRENCY', 'WAIT', 'READ_WRITE']) +EventDYNExecute(event_id=2, timestamp=datetime.datetime(2018, 4, 3, 17, 43, 21, 365000), status=' ', attachment_id=40, transaction_id=222, content="0 gds__dyn_version_1,\\n1 gds__dyn_begin,\\n2 gds__dyn_def_local_fld, 31,0, 'C','O','U','N','T','R','Y',32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,\\n36 gds__dyn_fld_source, 31,0, 'C','O','U','N','T','R','Y','N','A','M','E',32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,\\n70 gds__dyn_rel_name, 1,0, 'T',\\n74 gds__dyn_fld_position, 2,0, 0,0,\\n79 gds__dyn_update_flag, 2,0, 1,0,\\n84 gds__dyn_system_flag, 2,0, 0,0,\\n89 gds__dyn_end,\\n90 gds__dyn_def_sql_fld, 31,0, 'C','U','R','R','E','N','C','Y',32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,\\n124 gds__dyn_fld_type, 2,0, 37,0,\\n129 gds__dyn_fld_length, 2,0, 10,0,\\n134 gds__dyn_fld_scale, 2,0, 0,0,\\n139 gds__dyn_rel_name, 1,0, 'T',\\n143 gds__dyn_fld_position, 2,0, 1,0,\\n148 gds__dyn_update_flag, 2,0, 1,0,\\n153 gds__dyn_system_flag, 2,0, 0,0,\\n158 gds__dyn_end,\\n159 gds__dyn_end,\\n0 gds__dyn_eoc", run_time=0) +AttachmentInfo(attachment_id=20, database='/opt/firebird/examples/empbuild/employee.fdb', charset='NONE', protocol='', address='', user='SYSDBA', role='NONE', remote_process=None, remote_pid=None) +TransactionInfo(attachment_id=20, transaction_id=189, options=['CONCURRENCY', 'WAIT', 'READ_WRITE']) +EventDYNExecute(event_id=3, timestamp=datetime.datetime(2018, 3, 29, 13, 28, 45, 891000), status=' ', attachment_id=20, transaction_id=189, content=None, run_time=26) +""" + self._check_events(trace_lines, output) + def test_unknown(self): + # It could be an event unknown to trace plugin (case 1), or completelly new event unknown to trace parser (case 2) + trace_lines = """2014-05-23T11:00:28.5840 (3720:0000000000EFD9E8) Unknown event in ATTACH_DATABASE + /home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5) + /opt/firebird/bin/isql:8723 + +2018-03-22T10:06:59.5090 (4992:0x7f92a22a4978) EVENT_FROM_THE_FUTURE +This event may contain +various information +which could span +multiple lines. + +Yes, it could be very long! +""" + output = """EventUnknown(event_id=1, timestamp=datetime.datetime(2014, 5, 23, 11, 0, 28, 584000), data='Unknown event in ATTACH_DATABASE\\n/home/employee.fdb (ATT_8, SYSDBA:NONE, ISO88591, TCPv4:192.168.1.5)\\n/opt/firebird/bin/isql:8723') +EventUnknown(event_id=2, timestamp=datetime.datetime(2018, 3, 22, 10, 6, 59, 509000), data='EVENT_FROM_THE_FUTURE\\nThis event may contain\\nvarious information\\nwhich could span\\nmultiple lines.\\nYes, it could be very long!') +""" + self._check_events(trace_lines, output) + +class TestUtils(FDBTestBase): + def setUp(self): + super(TestUtils, self).setUp() + self.maxDiff = None + def test_objectlist(self): + Item = collections.namedtuple('Item', 'name,size,data') + Point = collections.namedtuple('Point', 'x,y') + data = [Item('A', 100, 'X' * 20), + Item('Aaa', 95, 'X' * 50), + Item('Abb', 90, 'Y' * 20), + Item('B', 85, 'Y' * 50), + Item('Baa', 80, 'Y' * 60), + Item('Bab', 75, 'Z' * 20), + Item('Bba', 65, 'Z' * 50), + Item('Bbb', 70, 'Z' * 50), + Item('C', 0, 'None'),] + # + olist = utils.ObjectList(data) + # basic list operations + self.assertEquals(len(data), len(olist)) + self.assertListEqual(data, olist) + self.assertListEqual(data, olist) + self.assertEqual(olist[0], data[0]) + self.assertEqual(olist.index(Item('B', 85, 'Y' * 50)), 3) + del olist[3] + self.assertEqual(len(olist), len(data) - 1) + olist.insert(3, Item('B', 85, 'Y' * 50)) + self.assertEquals(len(data), len(olist)) + self.assertListEqual(data, olist) + # sort - attrs + olist.sort(['name'], reverse=True) + self.assertListEqual(olist, [Item(name='C', size=0, data='None'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Abb', size=90, data='YYYYYYYYYYYYYYYYYYYY'), + Item(name='Aaa', size=95, data='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'), + Item(name='A', size=100, data='XXXXXXXXXXXXXXXXXXXX')]) + olist.sort(['data']) + self.assertListEqual(olist, [Item(name='C', size=0, data='None'), + Item(name='A', size=100, data='XXXXXXXXXXXXXXXXXXXX'), + Item(name='Aaa', size=95, data='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'), + Item(name='Abb', size=90, data='YYYYYYYYYYYYYYYYYYYY'), + Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ')]) + olist.sort(['data', 'size']) + self.assertListEqual(olist, [Item(name='C', size=0, data='None'), + Item(name='A', size=100, data='XXXXXXXXXXXXXXXXXXXX'), + Item(name='Aaa', size=95, data='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'), + Item(name='Abb', size=90, data='YYYYYYYYYYYYYYYYYYYY'), + Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ')]) + olist.sort(['name']) + self.assertListEqual(data, olist) + # sort - expr + olist = utils.ObjectList(data) + olist.sort(expr='item.size * len(item.name)', reverse=True) + self.assertListEqual(olist, [Item(name='Aaa', size=95, data='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'), + Item(name='Abb', size=90, data='YYYYYYYYYYYYYYYYYYYY'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='A', size=100, data='XXXXXXXXXXXXXXXXXXXX'), + Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='C', size=0, data='None')]) + olist.sort(expr=lambda x: x.size * len(x.name), reverse=True) + self.assertListEqual(olist, [Item(name='Aaa', size=95, data='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'), + Item(name='Abb', size=90, data='YYYYYYYYYYYYYYYYYYYY'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='A', size=100, data='XXXXXXXXXXXXXXXXXXXX'), + Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='C', size=0, data='None')]) + # filter/ifilter + olist = utils.ObjectList(data) + fc = olist.filter('item.name.startswith("B")') + self.assertIsInstance(fc, utils.ObjectList) + self.assertListEqual(fc, [Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ')]) + fc = olist.filter(lambda x: x.name.startswith("B")) + self.assertListEqual(fc, [Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ')]) + self.assertListEqual(list(olist.ifilter('item.name.startswith("B")')), + [Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ')]) + self.assertListEqual(list(olist.ifilter(lambda x: x.name.startswith("B"))), + [Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ')]) + # report/ireport + self.assertListEqual(olist.report('item.name', 'item.size'), + [('A', 100), ('Aaa', 95), ('Abb', 90), ('B', 85), + ('Baa', 80), ('Bab', 75), ('Bba', 65), ('Bbb', 70), ('C', 0)]) + self.assertListEqual(olist.report(lambda x: (x.name, x.size)), + [('A', 100), ('Aaa', 95), ('Abb', 90), ('B', 85), + ('Baa', 80), ('Bab', 75), ('Bba', 65), ('Bbb', 70), ('C', 0)]) + self.assertListEqual(list(olist.ireport('item.name', 'item.size')), + [('A', 100), ('Aaa', 95), ('Abb', 90), ('B', 85), + ('Baa', 80), ('Bab', 75), ('Bba', 65), ('Bbb', 70), ('C', 0)]) + self.assertListEqual(olist.report('"name: %s, size: %d" % (item.name, item.size)'), + ['name: A, size: 100', 'name: Aaa, size: 95', 'name: Abb, size: 90', + 'name: B, size: 85', 'name: Baa, size: 80', 'name: Bab, size: 75', + 'name: Bba, size: 65', 'name: Bbb, size: 70', 'name: C, size: 0']) + self.assertListEqual(olist.report(lambda x: "name: %s, size: %d" % (x.name, x.size)), + ['name: A, size: 100', 'name: Aaa, size: 95', 'name: Abb, size: 90', + 'name: B, size: 85', 'name: Baa, size: 80', 'name: Bab, size: 75', + 'name: Bba, size: 65', 'name: Bbb, size: 70', 'name: C, size: 0']) + # ecount + self.assertEqual(olist.ecount('item.name.startswith("B")'), 5) + self.assertEqual(olist.ecount(lambda x: x.name.startswith("B")), 5) + # split + truelist, falselist = olist.split('item.name.startswith("B")') + self.assertIsInstance(truelist, utils.ObjectList) + self.assertIsInstance(falselist, utils.ObjectList) + self.assertListEqual(list(truelist), [Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ')]) + self.assertListEqual(list(falselist), [Item('A', 100, 'X' * 20), Item('Aaa', 95, 'X' * 50), Item('Abb', 90, 'Y' * 20), + Item(name='C', size=0, data='None')]) + # extract + truelist = olist.extract('item.name.startswith("A")') + self.assertIsInstance(truelist, utils.ObjectList) + self.assertListEqual(list(truelist), [Item('A', 100, 'X' * 20), Item('Aaa', 95, 'X' * 50), Item('Abb', 90, 'Y' * 20)]) + self.assertListEqual(olist, [Item(name='B', size=85, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Baa', size=80, data='YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'), + Item(name='Bab', size=75, data='ZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bba', size=65, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='Bbb', size=70, data='ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'), + Item(name='C', size=0, data='None')]) + # clear + olist.clear() + self.assertEqual(len(olist), 0, 'list is not empty') + # get + olist.extend(data) + with self.assertRaises(TypeError) as cm: + item = olist.get('Baa') + exc = cm.exception + self.assertEqual(exc.args[0], "Key expression required") + self.assertIs(olist.get('Baa', 'item.name'), olist[4]) + olist = utils.ObjectList(data, Item, 'item.name') + self.assertIs(olist.get('Baa'), olist[4]) + self.assertIs(olist.get(80, 'item.size'), olist[4]) + self.assertIs(olist.get(80, lambda x, value: x.size == value), olist[4]) + olist.freeze() # Frozen list uses O(1) access via dict! + self.assertIs(olist.get('Baa'), olist[4]) + # contains + self.assertTrue(olist.contains('Baa')) + self.assertFalse(olist.contains('FX')) + self.assertTrue(olist.contains('Baa', 'item.name')) + self.assertTrue(olist.contains(80, 'item.size')) + self.assertTrue(olist.contains(80, lambda x, value: x.size == value)) + # immutability + olist = utils.ObjectList(data) + self.assertFalse(olist.frozen, "list is frozen") + self.assertListEqual(olist, data) + olist.freeze() + self.assertTrue(olist.frozen, "list is not frozen") + with self.assertRaises(TypeError) as cm: + olist[0] = Point(1, 1) + exc = cm.exception + self.assertEqual(exc.args[0], "list is frozen") + with self.assertRaises(TypeError) as cm: + olist[0:2] = [Point(1, 1)] + exc = cm.exception + self.assertEqual(exc.args[0], "list is frozen") + with self.assertRaises(TypeError) as cm: + olist.append(Point(1, 1)) + exc = cm.exception + self.assertEqual(exc.args[0], "list is frozen") + with self.assertRaises(TypeError) as cm: + olist.insert(0, [Point(1, 1)]) + exc = cm.exception + self.assertEqual(exc.args[0], "list is frozen") + with self.assertRaises(TypeError) as cm: + olist.extend([Point(1, 1)]) + exc = cm.exception + self.assertEqual(exc.args[0], "list is frozen") + with self.assertRaises(TypeError) as cm: + olist.clear() + exc = cm.exception + self.assertEqual(exc.args[0], "list is frozen") + with self.assertRaises(TypeError) as cm: + olist.extract('True') + exc = cm.exception + self.assertEqual(exc.args[0], "list is frozen") + with self.assertRaises(TypeError) as cm: + del olist[0] + exc = cm.exception + self.assertEqual(exc.args[0], "list is frozen") + with self.assertRaises(TypeError) as cm: + del olist[0:2] + exc = cm.exception + self.assertEqual(exc.args[0], "list is frozen") + # Limit to class(es) + olist = utils.ObjectList(data, Item) + olist = utils.ObjectList(_cls=(Item, Point)) + olist.append(Point(1, 1)) + olist.insert(0, Item('A', 10, 'XXX')) + olist[1] = Point(2, 2) + self.assertListEqual(olist, [Item('A', 10, 'XXX'), Point(2, 2)]) + with self.assertRaises(TypeError) as cm: + olist.append(list()) + exc = cm.exception + self.assertEqual(exc.args[0], "Value is not an instance of allowed class") + # Key + olist = utils.ObjectList(data, Item, 'item.name') + self.assertEqual(olist.get('A'), Item('A', 100, 'X' * 20)) + # any/all + self.assertFalse(olist.all('item.size > 0')) + self.assertTrue(olist.all('item.size < 200')) + self.assertTrue(olist.any('item.size > 0')) + self.assertFalse(olist.any('item.size < 200')) + +class TestGstatParse(FDBTestBase): + def setUp(self): + super(TestGstatParse, self).setUp() + def _parse_file(self, filename): + with open(filename) as f: + return gstat.parse(f) + def test_locale(self): + locale = getlocale(LC_ALL) + if locale[0] is None: + setlocale(LC_ALL,'') + locale = getlocale(LC_ALL) + try: + db = self._parse_file(os.path.join(self.dbpath, 'gstat25-h.out')) + self.assertEquals(locale, getlocale(LC_ALL), "Locale must not change") + if sys.platform == 'win32': + setlocale(LC_ALL, 'Czech_Czech Republic') + else: + setlocale(LC_ALL, 'cs_CZ') + nlocale = getlocale(LC_ALL) + db = self._parse_file(os.path.join(self.dbpath, 'gstat25-h.out')) + self.assertEquals(nlocale, getlocale(LC_ALL), "Locale must not change") + finally: + pass + #setlocale(LC_ALL, locale) + def test_parse25_h(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat25-h.out')) + data = {'attributes': (0,), 'backup_diff_file': None, 'backup_guid': None, 'bumped_transaction': 1, + 'checksum': 12345, 'completed': None, 'continuation_file': None, 'continuation_files': 0, + 'creation_date': datetime.datetime(2013, 5, 27, 23, 40, 53), 'database_dialect': 3, + 'encrypted_blob_pages': None, 'encrypted_data_pages': None, 'encrypted_index_pages': None, + 'executed': datetime.datetime(2018, 4, 4, 15, 29, 10), 'filename': '/home/fdb/test/fbtest25.fdb', + 'flags': 0, 'generation': 2844, 'gstat_version': 2, 'implementation': None, 'implementation_id': 24, + 'indices': 0, 'last_logical_page': None, 'next_attachment_id': 1067, 'next_header_page': 0, 'next_transaction': 1807, + 'oat': 1807, 'ods_version': '11.2', 'oit': 204, 'ost': 1807, 'page_buffers': 0, 'page_size': 4096, + 'replay_logging_file': None, 'root_filename': None, 'sequence_number': 0, 'shadow_count': 0, 'sweep_interval': 20000, + 'system_change_number': None, 'tables': 0} + self.assertIsInstance(db, gstat.StatDatabase) + self.assertDictEqual(data, get_object_data(db), 'Unexpected output from parser (database hdr)') + # + self.assertFalse(db.has_table_stats()) + self.assertFalse(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertFalse(db.has_system()) + def test_parse25_a(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat25-a.out')) + # Database + data = {'attributes': (0,), 'backup_diff_file': None, 'backup_guid': None, 'bumped_transaction': 1, + 'checksum': 12345, 'completed': None, 'continuation_file': None, 'continuation_files': 0, + 'creation_date': datetime.datetime(2013, 5, 27, 23, 40, 53), 'database_dialect': 3, + 'encrypted_blob_pages': None, 'encrypted_data_pages': None, 'encrypted_index_pages': None, + 'executed': datetime.datetime(2018, 4, 4, 15, 30, 10), 'filename': '/home/fdb/test/fbtest25.fdb', + 'flags': 0, 'generation': 2844, 'gstat_version': 2, 'implementation': None, 'implementation_id': 24, + 'indices': 39, 'last_logical_page': None, 'next_attachment_id': 1067, 'next_header_page': 0, 'next_transaction': 1807, + 'oat': 1807, 'ods_version': '11.2', 'oit': 204, 'ost': 1807, 'page_buffers': 0, 'page_size': 4096, + 'replay_logging_file': None, 'root_filename': None, 'sequence_number': 0, 'shadow_count': 0, 'sweep_interval': 20000, + 'system_change_number': None, 'tables': 15} + self.assertDictEqual(data, get_object_data(db), 'Unexpected output from parser (database hdr)') + # + self.assertTrue(db.has_table_stats()) + self.assertTrue(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertFalse(db.has_system()) + # Tables + data = [{'avg_fill': 86, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=1), 'index_root_page': 210, 'indices': 0, + 'max_versions': None, 'name': 'AR', 'primary_pointer_page': 209, 'table_id': 142, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 15, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 181, 'indices': 1, + 'max_versions': None, 'name': 'COUNTRY', 'primary_pointer_page': 180, 'table_id': 128, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 53, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 189, 'indices': 4, + 'max_versions': None, 'name': 'CUSTOMER', 'primary_pointer_page': 188, 'table_id': 132, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 47, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 185, 'indices': 5, + 'max_versions': None, 'name': 'DEPARTMENT', 'primary_pointer_page': 184, 'table_id': 130, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 44, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 2, 'data_pages': 2, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 187, 'indices': 4, + 'max_versions': None, 'name': 'EMPLOYEE', 'primary_pointer_page': 186, 'table_id': 131, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 20, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 196, 'indices': 3, + 'max_versions': None, 'name': 'EMPLOYEE_PROJECT', 'primary_pointer_page': 195, 'table_id': 135, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 73, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 3, 'data_pages': 3, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=2), 'index_root_page': 183, 'indices': 4, + 'max_versions': None, 'name': 'JOB', 'primary_pointer_page': 182, 'table_id': 129, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 29, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_root_page': 194, 'indices': 4, + 'max_versions': None, 'name': 'PROJECT', 'primary_pointer_page': 193, 'table_id': 134, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 80, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=1), 'index_root_page': 198, 'indices': 3, + 'max_versions': None, 'name': 'PROJ_DEPT_BUDGET', 'primary_pointer_page': 197, 'table_id': 136, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 58, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 200, 'indices': 4, + 'max_versions': None, 'name': 'SALARY_HISTORY', 'primary_pointer_page': 199, 'table_id': 137, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 68, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 202, 'indices': 6, + 'max_versions': None, 'name': 'SALES', 'primary_pointer_page': 201, 'table_id': 138, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 282, 'indices': 1, + 'max_versions': None, 'name': 'T', 'primary_pointer_page': 205, 'table_id': 235, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 20, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 208, 'indices': 0, + 'max_versions': None, 'name': 'T2', 'primary_pointer_page': 207, 'table_id': 141, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 204, 'indices': 0, + 'max_versions': None, 'name': 'T3', 'primary_pointer_page': 203, 'table_id': 139, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 4, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 192, 'indices': 0, + 'max_versions': None, 'name': 'T4', 'primary_pointer_page': 191, 'table_id': 133, 'total_records': None, + 'total_versions': None}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.tables[i]), 'Unexpected output from parser (tables)') + i += 1 + # Indices + data = [{'avg_data_length': 6.5, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY1', 'nodes': 14, 'total_dup': 0}, + {'avg_data_length': 15.87, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'CUSTNAMEX', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 17.27, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'CUSTREGION', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 4.87, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN23', 'nodes': 15, 'total_dup': 4}, + {'avg_data_length': 1.13, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY22', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 5.38, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 3, 'name': 'BUDGETX', 'nodes': 21, 'total_dup': 7}, + {'avg_data_length': 13.95, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$4', 'nodes': 21, 'total_dup': 0}, + {'avg_data_length': 1.14, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, + 'leaf_buckets': 1, 'max_dup': 3, 'name': 'RDB$FOREIGN10', 'nodes': 21, 'total_dup': 3}, + {'avg_data_length': 0.81, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN6', 'nodes': 21, 'total_dup': 13}, + {'avg_data_length': 1.71, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY5', 'nodes': 21, 'total_dup': 0}, + {'avg_data_length': 15.52, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'NAMEX', 'nodes': 42, 'total_dup': 0}, + {'avg_data_length': 0.81, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN8', 'nodes': 42, 'total_dup': 23}, + {'avg_data_length': 6.79, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN9', 'nodes': 42, 'total_dup': 15}, + {'avg_data_length': 1.31, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY7', 'nodes': 42, 'total_dup': 0}, + {'avg_data_length': 1.04, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$FOREIGN15', 'nodes': 28, 'total_dup': 6}, + {'avg_data_length': 0.86, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 9, 'name': 'RDB$FOREIGN16', 'nodes': 28, 'total_dup': 23}, + {'avg_data_length': 9.11, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY14', 'nodes': 28, 'total_dup': 0}, + {'avg_data_length': 10.9, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 1, 'name': 'MAXSALX', 'nodes': 31, 'total_dup': 5}, + {'avg_data_length': 10.29, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 2, 'name': 'MINSALX', 'nodes': 31, 'total_dup': 7}, + {'avg_data_length': 1.39, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 20, 'name': 'RDB$FOREIGN3', 'nodes': 31, 'total_dup': 24}, + {'avg_data_length': 10.45, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY2', 'nodes': 31, 'total_dup': 0}, + {'avg_data_length': 22.5, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'PRODTYPEX', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 13.33, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$11', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 1.33, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$FOREIGN13', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 4.83, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY12', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 0.71, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 5, 'name': 'RDB$FOREIGN18', 'nodes': 24, 'total_dup': 15}, + {'avg_data_length': 1.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 8, 'name': 'RDB$FOREIGN19', 'nodes': 24, 'total_dup': 19}, + {'avg_data_length': 6.83, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY17', 'nodes': 24, 'total_dup': 0}, + {'avg_data_length': 0.31, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 21, 'name': 'CHANGEX', 'nodes': 49, 'total_dup': 46}, + {'avg_data_length': 0.9, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$FOREIGN21', 'nodes': 49, 'total_dup': 16}, + {'avg_data_length': 18.29, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY20', 'nodes': 49, 'total_dup': 0}, + {'avg_data_length': 0.29, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 28, 'name': 'UPDATERX', 'nodes': 49, 'total_dup': 46}, + {'avg_data_length': 2.55, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 6, 'name': 'NEEDX', 'nodes': 33, 'total_dup': 11}, + {'avg_data_length': 1.85, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 3, 'name': 'QTYX', 'nodes': 33, 'total_dup': 11}, + {'avg_data_length': 0.52, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN25', 'nodes': 33, 'total_dup': 18}, + {'avg_data_length': 0.45, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 5, + 'leaf_buckets': 1, 'max_dup': 7, 'name': 'RDB$FOREIGN26', 'nodes': 33, 'total_dup': 25}, + {'avg_data_length': 4.48, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY24', 'nodes': 33, 'total_dup': 0}, + {'avg_data_length': 0.97, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 14, 'name': 'SALESTATX', 'nodes': 33, 'total_dup': 27}, + {'avg_data_length': 0.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY104', 'nodes': 0, 'total_dup': 0}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.indices[i], ['table']), 'Unexpected output from parser (indices)') + i += 1 + def test_parse25_d(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat25-d.out')) + # Database + data = {'attributes': (0,), 'backup_diff_file': None, 'backup_guid': None, 'bumped_transaction': 1, + 'checksum': 12345, 'completed': None, 'continuation_file': None, 'continuation_files': 0, + 'creation_date': datetime.datetime(2013, 5, 27, 23, 40, 53), 'database_dialect': 3, + 'encrypted_blob_pages': None, 'encrypted_data_pages': None, 'encrypted_index_pages': None, + 'executed': datetime.datetime(2018, 4, 4, 15, 32, 25), 'filename': '/home/fdb/test/fbtest25.fdb', + 'flags': 0, 'generation': 2856, 'gstat_version': 2, 'implementation': None, 'implementation_id': 24, + 'indices': 0, 'last_logical_page': None, 'next_attachment_id': 1071, 'next_header_page': 0, 'next_transaction': 1811, + 'oat': 1811, 'ods_version': '11.2', 'oit': 204, 'ost': 1811, 'page_buffers': 0, 'page_size': 4096, + 'replay_logging_file': None, 'root_filename': None, 'sequence_number': 0, 'shadow_count': 0, 'sweep_interval': 20000, + 'system_change_number': None, 'tables': 15} + self.assertDictEqual(data, get_object_data(db), 'Unexpected output from parser (database hdr)') + # + self.assertTrue(db.has_table_stats()) + self.assertFalse(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertFalse(db.has_system()) + # Tables + data = [{'avg_fill': 86, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=1), 'index_root_page': 210, 'indices': 0, + 'max_versions': None, 'name': 'AR', 'primary_pointer_page': 209, 'table_id': 142, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 15, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 181, 'indices': 0, + 'max_versions': None, 'name': 'COUNTRY', 'primary_pointer_page': 180, 'table_id': 128, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 53, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 189, 'indices': 0, + 'max_versions': None, 'name': 'CUSTOMER', 'primary_pointer_page': 188, 'table_id': 132, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 47, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 185, 'indices': 0, + 'max_versions': None, 'name': 'DEPARTMENT', 'primary_pointer_page': 184, 'table_id': 130, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 44, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 2, 'data_pages': 2, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 187, 'indices': 0, + 'max_versions': None, 'name': 'EMPLOYEE', 'primary_pointer_page': 186, 'table_id': 131, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 20, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 196, 'indices': 0, + 'max_versions': None, 'name': 'EMPLOYEE_PROJECT', 'primary_pointer_page': 195, 'table_id': 135, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 73, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 3, 'data_pages': 3, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=2), 'index_root_page': 183, 'indices': 0, + 'max_versions': None, 'name': 'JOB', 'primary_pointer_page': 182, 'table_id': 129, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 29, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_root_page': 194, 'indices': 0, + 'max_versions': None, 'name': 'PROJECT', 'primary_pointer_page': 193, 'table_id': 134, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 80, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=1), 'index_root_page': 198, 'indices': 0, + 'max_versions': None, 'name': 'PROJ_DEPT_BUDGET', 'primary_pointer_page': 197, 'table_id': 136, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 58, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 200, 'indices': 0, + 'max_versions': None, 'name': 'SALARY_HISTORY', 'primary_pointer_page': 199, 'table_id': 137, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 68, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 202, 'indices': 0, + 'max_versions': None, 'name': 'SALES', 'primary_pointer_page': 201, 'table_id': 138, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 282, 'indices': 0, + 'max_versions': None, 'name': 'T', 'primary_pointer_page': 205, 'table_id': 235, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 20, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 208, 'indices': 0, + 'max_versions': None, 'name': 'T2', 'primary_pointer_page': 207, 'table_id': 141, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 204, 'indices': 0, + 'max_versions': None, 'name': 'T3', 'primary_pointer_page': 203, 'table_id': 139, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 4, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 192, 'indices': 0, + 'max_versions': None, 'name': 'T4', 'primary_pointer_page': 191, 'table_id': 133, 'total_records': None, + 'total_versions': None}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.tables[i]), 'Unexpected output from parser (tables)') + i += 1 + # Indices + self.assertEqual(len(db.indices), 0) + def test_parse25_f(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat25-f.out')) + # + self.assertTrue(db.has_table_stats()) + self.assertTrue(db.has_index_stats()) + self.assertTrue(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertTrue(db.has_system()) + # Check system tables + data = ['RDB$BACKUP_HISTORY', 'RDB$CHARACTER_SETS', 'RDB$CHECK_CONSTRAINTS', 'RDB$COLLATIONS', 'RDB$DATABASE', 'RDB$DEPENDENCIES', + 'RDB$EXCEPTIONS', 'RDB$FIELDS', 'RDB$FIELD_DIMENSIONS', 'RDB$FILES', 'RDB$FILTERS', 'RDB$FORMATS', 'RDB$FUNCTIONS', + 'RDB$FUNCTION_ARGUMENTS', 'RDB$GENERATORS', 'RDB$INDEX_SEGMENTS', 'RDB$INDICES', 'RDB$LOG_FILES', 'RDB$PAGES', + 'RDB$PROCEDURES', 'RDB$PROCEDURE_PARAMETERS', 'RDB$REF_CONSTRAINTS', 'RDB$RELATIONS', 'RDB$RELATION_CONSTRAINTS', + 'RDB$RELATION_FIELDS', 'RDB$ROLES', 'RDB$SECURITY_CLASSES', 'RDB$TRANSACTIONS', 'RDB$TRIGGERS', 'RDB$TRIGGER_MESSAGES', + 'RDB$TYPES', 'RDB$USER_PRIVILEGES', 'RDB$VIEW_RELATIONS'] + for table in db.tables: + if table.name.startswith('RDB$'): + self.assertIn(table.name, data) + # check system indices + data = ['RDB$PRIMARY1', 'RDB$FOREIGN23', 'RDB$PRIMARY22', 'RDB$4', 'RDB$FOREIGN10', 'RDB$FOREIGN6', 'RDB$PRIMARY5', 'RDB$FOREIGN8', + 'RDB$FOREIGN9', 'RDB$PRIMARY7', 'RDB$FOREIGN15', 'RDB$FOREIGN16', 'RDB$PRIMARY14', 'RDB$FOREIGN3', 'RDB$PRIMARY2', 'RDB$11', + 'RDB$FOREIGN13', 'RDB$PRIMARY12', 'RDB$FOREIGN18', 'RDB$FOREIGN19', 'RDB$PRIMARY17', 'RDB$INDEX_44', 'RDB$INDEX_19', + 'RDB$INDEX_25', 'RDB$INDEX_14', 'RDB$INDEX_40', 'RDB$INDEX_20', 'RDB$INDEX_26', 'RDB$INDEX_27', 'RDB$INDEX_28', + 'RDB$INDEX_23', 'RDB$INDEX_24', 'RDB$INDEX_2', 'RDB$INDEX_36', 'RDB$INDEX_17', 'RDB$INDEX_45', 'RDB$INDEX_16', 'RDB$INDEX_9', + 'RDB$INDEX_10', 'RDB$INDEX_11', 'RDB$INDEX_46', 'RDB$INDEX_6', 'RDB$INDEX_31', 'RDB$INDEX_41', 'RDB$INDEX_5', 'RDB$INDEX_21', + 'RDB$INDEX_22', 'RDB$INDEX_18', 'RDB$INDEX_47', 'RDB$INDEX_48', 'RDB$INDEX_13', 'RDB$INDEX_0', 'RDB$INDEX_1', 'RDB$INDEX_12', + 'RDB$INDEX_42', 'RDB$INDEX_43', 'RDB$INDEX_15', 'RDB$INDEX_3', 'RDB$INDEX_4', 'RDB$INDEX_39', 'RDB$INDEX_7', 'RDB$INDEX_32', + 'RDB$INDEX_38', 'RDB$INDEX_8', 'RDB$INDEX_35', 'RDB$INDEX_37', 'RDB$INDEX_29', 'RDB$INDEX_30', 'RDB$INDEX_33', 'RDB$INDEX_34', + 'RDB$FOREIGN21', 'RDB$PRIMARY20', 'RDB$FOREIGN25', 'RDB$FOREIGN26', 'RDB$PRIMARY24', 'RDB$PRIMARY104'] + for index in db.indices: + if index.name.startswith('RDB$'): + self.assertIn(index.name, data) + def test_parse25_i(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat25-i.out')) + # + self.assertFalse(db.has_table_stats()) + self.assertTrue(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + # Tables + data = [{'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 0, 'max_versions': None, 'name': 'AR', + 'primary_pointer_page': None, 'table_id': 142, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 1, 'max_versions': None, 'name': 'COUNTRY', + 'primary_pointer_page': None, 'table_id': 128, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 4, 'max_versions': None, 'name': 'CUSTOMER', + 'primary_pointer_page': None, 'table_id': 132, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 5, 'max_versions': None, 'name': 'DEPARTMENT', + 'primary_pointer_page': None, 'table_id': 130, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 4, 'max_versions': None, 'name': 'EMPLOYEE', + 'primary_pointer_page': None, 'table_id': 131, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 3, 'max_versions': None, 'name': 'EMPLOYEE_PROJECT', + 'primary_pointer_page': None, 'table_id': 135, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 4, 'max_versions': None, 'name': 'JOB', + 'primary_pointer_page': None, 'table_id': 129, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 4, 'max_versions': None, 'name': 'PROJECT', + 'primary_pointer_page': None, 'table_id': 134, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 3, 'max_versions': None, 'name': 'PROJ_DEPT_BUDGET', + 'primary_pointer_page': None, 'table_id': 136, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 4, 'max_versions': None, 'name': 'SALARY_HISTORY', + 'primary_pointer_page': None, 'table_id': 137, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 6, 'max_versions': None, 'name': 'SALES', + 'primary_pointer_page': None, 'table_id': 138, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 1, 'max_versions': None, 'name': 'T', + 'primary_pointer_page': None, 'table_id': 235, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 0, 'max_versions': None, 'name': 'T2', + 'primary_pointer_page': None, 'table_id': 141, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 0, 'max_versions': None, 'name': 'T3', + 'primary_pointer_page': None, 'table_id': 139, 'total_records': None, 'total_versions': None}, + {'avg_fill': None, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': None, 'data_pages': None, + 'distribution': None, 'index_root_page': None, 'indices': 0, 'max_versions': None, 'name': 'T4', + 'primary_pointer_page': None, 'table_id': 133, 'total_records': None, 'total_versions': None}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.tables[i]), 'Unexpected output from parser (tables)') + i += 1 + # Indices + #data = [] + #for t in db.indices: + #data.append(get_object_data(t, ['table'])) + #pprint(data) + data = [{'avg_data_length': 6.5, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY1', 'nodes': 14, 'total_dup': 0}, + {'avg_data_length': 15.87, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'CUSTNAMEX', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 17.27, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'CUSTREGION', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 4.87, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN23', 'nodes': 15, 'total_dup': 4}, + {'avg_data_length': 1.13, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY22', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 5.38, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 3, 'name': 'BUDGETX', 'nodes': 21, 'total_dup': 7}, + {'avg_data_length': 13.95, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$4', 'nodes': 21, 'total_dup': 0}, + {'avg_data_length': 1.14, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, + 'leaf_buckets': 1, 'max_dup': 3, 'name': 'RDB$FOREIGN10', 'nodes': 21, 'total_dup': 3}, + {'avg_data_length': 0.81, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN6', 'nodes': 21, 'total_dup': 13}, + {'avg_data_length': 1.71, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY5', 'nodes': 21, 'total_dup': 0}, + {'avg_data_length': 15.52, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'NAMEX', 'nodes': 42, 'total_dup': 0}, + {'avg_data_length': 0.81, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN8', 'nodes': 42, 'total_dup': 23}, + {'avg_data_length': 6.79, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN9', 'nodes': 42, 'total_dup': 15}, + {'avg_data_length': 1.31, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY7', 'nodes': 42, 'total_dup': 0}, + {'avg_data_length': 1.04, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$FOREIGN15', 'nodes': 28, 'total_dup': 6}, + {'avg_data_length': 0.86, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 9, 'name': 'RDB$FOREIGN16', 'nodes': 28, 'total_dup': 23}, + {'avg_data_length': 9.11, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY14', 'nodes': 28, 'total_dup': 0}, + {'avg_data_length': 10.9, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 1, 'name': 'MAXSALX', 'nodes': 31, 'total_dup': 5}, + {'avg_data_length': 10.29, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 2, 'name': 'MINSALX', 'nodes': 31, 'total_dup': 7}, + {'avg_data_length': 1.39, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 20, 'name': 'RDB$FOREIGN3', 'nodes': 31, 'total_dup': 24}, + {'avg_data_length': 10.45, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY2', 'nodes': 31, 'total_dup': 0}, + {'avg_data_length': 22.5, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'PRODTYPEX', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 13.33, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$11', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 1.33, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$FOREIGN13', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 4.83, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY12', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 0.71, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 5, 'name': 'RDB$FOREIGN18', 'nodes': 24, 'total_dup': 15}, + {'avg_data_length': 1.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 8, 'name': 'RDB$FOREIGN19', 'nodes': 24, 'total_dup': 19}, + {'avg_data_length': 6.83, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY17', 'nodes': 24, 'total_dup': 0}, + {'avg_data_length': 0.31, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 21, 'name': 'CHANGEX', 'nodes': 49, 'total_dup': 46}, + {'avg_data_length': 0.9, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$FOREIGN21', 'nodes': 49, 'total_dup': 16}, + {'avg_data_length': 18.29, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY20', 'nodes': 49, 'total_dup': 0}, + {'avg_data_length': 0.29, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 28, 'name': 'UPDATERX', 'nodes': 49, 'total_dup': 46}, + {'avg_data_length': 2.55, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 6, 'name': 'NEEDX', 'nodes': 33, 'total_dup': 11}, + {'avg_data_length': 1.85, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 3, 'name': 'QTYX', 'nodes': 33, 'total_dup': 11}, + {'avg_data_length': 0.52, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN25', 'nodes': 33, 'total_dup': 18}, + {'avg_data_length': 0.45, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 5, + 'leaf_buckets': 1, 'max_dup': 7, 'name': 'RDB$FOREIGN26', 'nodes': 33, 'total_dup': 25}, + {'avg_data_length': 4.48, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY24', 'nodes': 33, 'total_dup': 0}, + {'avg_data_length': 0.97, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 14, 'name': 'SALESTATX', 'nodes': 33, 'total_dup': 27}, + {'avg_data_length': 0.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY104', 'nodes': 0, 'total_dup': 0}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.indices[i], ['table']), 'Unexpected output from parser (indices)') + i += 1 + def test_parse25_r(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat25-r.out')) + # + self.assertTrue(db.has_table_stats()) + self.assertTrue(db.has_index_stats()) + self.assertTrue(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertFalse(db.has_system()) + # Tables + data = [{'avg_fill': 86, 'avg_record_length': 22.07, 'avg_version_length': 0.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=1), 'index_root_page': 210, 'indices': 0, + 'max_versions': 0, 'name': 'AR', 'primary_pointer_page': 209, 'table_id': 142, 'total_records': 15, 'total_versions': 0}, + {'avg_fill': 15, 'avg_record_length': 26.86, 'avg_version_length': 0.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 181, 'indices': 1, + 'max_versions': 0, 'name': 'COUNTRY', 'primary_pointer_page': 180, 'table_id': 128, 'total_records': 14, 'total_versions': 0}, + {'avg_fill': 53, 'avg_record_length': 126.47, 'avg_version_length': 0.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 189, 'indices': 4, + 'max_versions': 0, 'name': 'CUSTOMER', 'primary_pointer_page': 188, 'table_id': 132, 'total_records': 15, 'total_versions': 0}, + {'avg_fill': 47, 'avg_record_length': 73.62, 'avg_version_length': 0.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 185, 'indices': 5, + 'max_versions': 0, 'name': 'DEPARTMENT', 'primary_pointer_page': 184, 'table_id': 130, 'total_records': 21, 'total_versions': 0}, + {'avg_fill': 44, 'avg_record_length': 68.86, 'avg_version_length': 0.0, 'data_page_slots': 2, 'data_pages': 2, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 187, 'indices': 4, + 'max_versions': 0, 'name': 'EMPLOYEE', 'primary_pointer_page': 186, 'table_id': 131, 'total_records': 42, 'total_versions': 0}, + {'avg_fill': 20, 'avg_record_length': 12.0, 'avg_version_length': 0.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 196, 'indices': 3, + 'max_versions': 0, 'name': 'EMPLOYEE_PROJECT', 'primary_pointer_page': 195, 'table_id': 135, 'total_records': 28, 'total_versions': 0}, + {'avg_fill': 73, 'avg_record_length': 67.13, 'avg_version_length': 0.0, 'data_page_slots': 3, 'data_pages': 3, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=2), 'index_root_page': 183, 'indices': 4, + 'max_versions': 0, 'name': 'JOB', 'primary_pointer_page': 182, 'table_id': 129, 'total_records': 31, 'total_versions': 0}, + {'avg_fill': 29, 'avg_record_length': 48.83, 'avg_version_length': 0.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_root_page': 194, 'indices': 4, + 'max_versions': 0, 'name': 'PROJECT', 'primary_pointer_page': 193, 'table_id': 134, 'total_records': 6, 'total_versions': 0}, + {'avg_fill': 80, 'avg_record_length': 30.96, 'avg_version_length': 0.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=1), 'index_root_page': 198, 'indices': 3, + 'max_versions': 0, 'name': 'PROJ_DEPT_BUDGET', 'primary_pointer_page': 197, 'table_id': 136, 'total_records': 24, + 'total_versions': 0}, + {'avg_fill': 58, 'avg_record_length': 31.51, 'avg_version_length': 0.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 200, 'indices': 4, + 'max_versions': 0, 'name': 'SALARY_HISTORY', 'primary_pointer_page': 199, 'table_id': 137, 'total_records': 49, 'total_versions': 0}, + {'avg_fill': 68, 'avg_record_length': 67.24, 'avg_version_length': 0.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 202, 'indices': 6, + 'max_versions': 0, 'name': 'SALES', 'primary_pointer_page': 201, 'table_id': 138, 'total_records': 33, 'total_versions': 0}, + {'avg_fill': 0, 'avg_record_length': 0.0, 'avg_version_length': 0.0, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 282, 'indices': 1, + 'max_versions': 0, 'name': 'T', 'primary_pointer_page': 205, 'table_id': 235, 'total_records': 0, 'total_versions': 0}, + {'avg_fill': 20, 'avg_record_length': 0.0, 'avg_version_length': 17.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 208, 'indices': 0, + 'max_versions': 1, 'name': 'T2', 'primary_pointer_page': 207, 'table_id': 141, 'total_records': 2, 'total_versions': 2}, + {'avg_fill': 0, 'avg_record_length': 0.0, 'avg_version_length': 0.0, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 204, 'indices': 0, + 'max_versions': 0, 'name': 'T3', 'primary_pointer_page': 203, 'table_id': 139, 'total_records': 0, 'total_versions': 0}, + {'avg_fill': 4, 'avg_record_length': 0.0, 'avg_version_length': 129.0, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 192, 'indices': 0, + 'max_versions': 1, 'name': 'T4', 'primary_pointer_page': 191, 'table_id': 133, 'total_records': 1, 'total_versions': 1}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.tables[i]), 'Unexpected output from parser (tables)') + i += 1 + # Indices + data = [{'avg_data_length': 6.5, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY1', 'nodes': 14, 'total_dup': 0}, + {'avg_data_length': 15.87, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'CUSTNAMEX', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 17.27, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'CUSTREGION', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 4.87, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN23', 'nodes': 15, 'total_dup': 4}, + {'avg_data_length': 1.13, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY22', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 5.38, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 3, 'name': 'BUDGETX', 'nodes': 21, 'total_dup': 7}, + {'avg_data_length': 13.95, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$4', 'nodes': 21, 'total_dup': 0}, + {'avg_data_length': 1.14, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, + 'leaf_buckets': 1, 'max_dup': 3, 'name': 'RDB$FOREIGN10', 'nodes': 21, 'total_dup': 3}, + {'avg_data_length': 0.81, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN6', 'nodes': 21, 'total_dup': 13}, + {'avg_data_length': 1.71, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY5', 'nodes': 21, 'total_dup': 0}, + {'avg_data_length': 15.52, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'NAMEX', 'nodes': 42, 'total_dup': 0}, + {'avg_data_length': 0.81, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN8', 'nodes': 42, 'total_dup': 23}, + {'avg_data_length': 6.79, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN9', 'nodes': 42, 'total_dup': 15}, + {'avg_data_length': 1.31, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY7', 'nodes': 42, 'total_dup': 0}, + {'avg_data_length': 1.04, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$FOREIGN15', 'nodes': 28, 'total_dup': 6}, + {'avg_data_length': 0.86, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 9, 'name': 'RDB$FOREIGN16', 'nodes': 28, 'total_dup': 23}, + {'avg_data_length': 9.11, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY14', 'nodes': 28, 'total_dup': 0}, + {'avg_data_length': 10.9, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 1, 'name': 'MAXSALX', 'nodes': 31, 'total_dup': 5}, + {'avg_data_length': 10.29, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 2, 'name': 'MINSALX', 'nodes': 31, 'total_dup': 7}, + {'avg_data_length': 1.39, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 20, 'name': 'RDB$FOREIGN3', 'nodes': 31, 'total_dup': 24}, + {'avg_data_length': 10.45, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY2', 'nodes': 31, 'total_dup': 0}, + {'avg_data_length': 22.5, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'PRODTYPEX', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 13.33, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$11', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 1.33, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$FOREIGN13', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 4.83, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY12', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 0.71, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 5, 'name': 'RDB$FOREIGN18', 'nodes': 24, 'total_dup': 15}, + {'avg_data_length': 1.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 8, 'name': 'RDB$FOREIGN19', 'nodes': 24, 'total_dup': 19}, + {'avg_data_length': 6.83, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY17', 'nodes': 24, 'total_dup': 0}, + {'avg_data_length': 0.31, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 21, 'name': 'CHANGEX', 'nodes': 49, 'total_dup': 46}, + {'avg_data_length': 0.9, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$FOREIGN21', 'nodes': 49, 'total_dup': 16}, + {'avg_data_length': 18.29, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY20', 'nodes': 49, 'total_dup': 0}, + {'avg_data_length': 0.29, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 28, 'name': 'UPDATERX', 'nodes': 49, 'total_dup': 46}, + {'avg_data_length': 2.55, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, + 'leaf_buckets': 1, 'max_dup': 6, 'name': 'NEEDX', 'nodes': 33, 'total_dup': 11}, + {'avg_data_length': 1.85, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, + 'leaf_buckets': 1, 'max_dup': 3, 'name': 'QTYX', 'nodes': 33, 'total_dup': 11}, + {'avg_data_length': 0.52, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, + 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN25', 'nodes': 33, 'total_dup': 18}, + {'avg_data_length': 0.45, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 5, + 'leaf_buckets': 1, 'max_dup': 7, 'name': 'RDB$FOREIGN26', 'nodes': 33, 'total_dup': 25}, + {'avg_data_length': 4.48, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY24', 'nodes': 33, 'total_dup': 0}, + {'avg_data_length': 0.97, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, + 'leaf_buckets': 1, 'max_dup': 14, 'name': 'SALESTATX', 'nodes': 33, 'total_dup': 27}, + {'avg_data_length': 0.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, + 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY104', 'nodes': 0, 'total_dup': 0}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.indices[i], ['table']), 'Unexpected output from parser (indices)') + i += 1 + def test_parse25_s(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat25-s.out')) + # + self.assertTrue(db.has_table_stats()) + self.assertTrue(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertTrue(db.has_system()) + # Tables + data = [{'avg_fill': 86, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=1), 'index_root_page': 210, 'indices': 0, + 'max_versions': None, 'name': 'AR', 'primary_pointer_page': 209, 'table_id': 142, 'total_records': None, 'total_versions': None}, + {'avg_fill': 15, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 181, 'indices': 1, + 'max_versions': None, 'name': 'COUNTRY', 'primary_pointer_page': 180, 'table_id': 128, 'total_records': None, 'total_versions': None}, + {'avg_fill': 53, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 189, 'indices': 4, + 'max_versions': None, 'name': 'CUSTOMER', 'primary_pointer_page': 188, 'table_id': 132, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 47, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 185, 'indices': 5, + 'max_versions': None, 'name': 'DEPARTMENT', 'primary_pointer_page': 184, 'table_id': 130, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 44, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 2, 'data_pages': 2, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 187, 'indices': 4, + 'max_versions': None, 'name': 'EMPLOYEE', 'primary_pointer_page': 186, 'table_id': 131, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 20, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 196, 'indices': 3, + 'max_versions': None, 'name': 'EMPLOYEE_PROJECT', 'primary_pointer_page': 195, 'table_id': 135, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 73, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 3, 'data_pages': 3, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=2), 'index_root_page': 183, 'indices': 4, + 'max_versions': None, 'name': 'JOB', 'primary_pointer_page': 182, 'table_id': 129, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 29, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_root_page': 194, 'indices': 4, + 'max_versions': None, 'name': 'PROJECT', 'primary_pointer_page': 193, 'table_id': 134, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 80, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=1), 'index_root_page': 198, 'indices': 3, + 'max_versions': None, 'name': 'PROJ_DEPT_BUDGET', 'primary_pointer_page': 197, 'table_id': 136, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 69, 'indices': 1, + 'max_versions': None, 'name': 'RDB$BACKUP_HISTORY', 'primary_pointer_page': 68, 'table_id': 32, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 69, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 61, 'indices': 2, + 'max_versions': None, 'name': 'RDB$CHARACTER_SETS', 'primary_pointer_page': 60, 'table_id': 28, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 37, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 2, 'data_pages': 2, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 53, 'indices': 2, + 'max_versions': None, 'name': 'RDB$CHECK_CONSTRAINTS', 'primary_pointer_page': 52, 'table_id': 24, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 55, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 3, 'data_pages': 3, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=2, d100=0), 'index_root_page': 63, 'indices': 2, + 'max_versions': None, 'name': 'RDB$COLLATIONS', 'primary_pointer_page': 62, 'table_id': 29, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 1, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 7, 'indices': 0, + 'max_versions': None, 'name': 'RDB$DATABASE', 'primary_pointer_page': 6, 'table_id': 1, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 49, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 6, 'data_pages': 5, + 'distribution': FillDistribution(d20=1, d40=1, d50=1, d80=2, d100=0), 'index_root_page': 31, 'indices': 2, + 'max_versions': None, 'name': 'RDB$DEPENDENCIES', 'primary_pointer_page': 30, 'table_id': 13, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 12, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 65, 'indices': 2, + 'max_versions': None, 'name': 'RDB$EXCEPTIONS', 'primary_pointer_page': 64, 'table_id': 30, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 62, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 6, 'data_pages': 6, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=4, d100=1), 'index_root_page': 9, 'indices': 1, + 'max_versions': None, 'name': 'RDB$FIELDS', 'primary_pointer_page': 8, 'table_id': 2, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 19, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 47, 'indices': 1, + 'max_versions': None, 'name': 'RDB$FIELD_DIMENSIONS', 'primary_pointer_page': 46, 'table_id': 21, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 25, 'indices': 0, + 'max_versions': None, 'name': 'RDB$FILES', 'primary_pointer_page': 24, 'table_id': 10, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 37, 'indices': 2, + 'max_versions': None, 'name': 'RDB$FILTERS', 'primary_pointer_page': 36, 'table_id': 16, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 76, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 21, 'indices': 1, + 'max_versions': None, 'name': 'RDB$FORMATS', 'primary_pointer_page': 20, 'table_id': 8, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 4, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 33, 'indices': 1, + 'max_versions': None, 'name': 'RDB$FUNCTIONS', 'primary_pointer_page': 32, 'table_id': 14, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 9, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 35, 'indices': 1, + 'max_versions': None, 'name': 'RDB$FUNCTION_ARGUMENTS', 'primary_pointer_page': 34, 'table_id': 15, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 22, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_root_page': 45, 'indices': 2, + 'max_versions': None, 'name': 'RDB$GENERATORS', 'primary_pointer_page': 44, 'table_id': 20, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 79, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 3, 'data_pages': 3, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=2, d100=1), 'index_root_page': 11, 'indices': 1, + 'max_versions': None, 'name': 'RDB$INDEX_SEGMENTS', 'primary_pointer_page': 10, 'table_id': 3, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 48, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 4, 'data_pages': 4, + 'distribution': FillDistribution(d20=1, d40=1, d50=0, d80=2, d100=0), 'index_root_page': 13, 'indices': 3, + 'max_versions': None, 'name': 'RDB$INDICES', 'primary_pointer_page': 12, 'table_id': 4, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 55, 'indices': 0, + 'max_versions': None, 'name': 'RDB$LOG_FILES', 'primary_pointer_page': 54, 'table_id': 25, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 38, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 2, 'data_pages': 2, + 'distribution': FillDistribution(d20=1, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 4, 'indices': 0, + 'max_versions': None, 'name': 'RDB$PAGES', 'primary_pointer_page': 3, 'table_id': 0, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 94, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 3, 'data_pages': 3, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=3), 'index_root_page': 57, 'indices': 2, + 'max_versions': None, 'name': 'RDB$PROCEDURES', 'primary_pointer_page': 56, 'table_id': 26, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 48, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 59, 'indices': 3, + 'max_versions': None, 'name': 'RDB$PROCEDURE_PARAMETERS', 'primary_pointer_page': 58, 'table_id': 27, + 'total_records': None, 'total_versions': None}, + {'avg_fill': 25, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), 'index_root_page': 51, 'indices': 1, + 'max_versions': None, 'name': 'RDB$REF_CONSTRAINTS', 'primary_pointer_page': 50, 'table_id': 23, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 71, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 6, 'data_pages': 5, + 'distribution': FillDistribution(d20=0, d40=0, d50=2, d80=1, d100=2), 'index_root_page': 17, 'indices': 2, + 'max_versions': None, 'name': 'RDB$RELATIONS', 'primary_pointer_page': 16, 'table_id': 6, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 67, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 2, 'data_pages': 2, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=2, d100=0), 'index_root_page': 49, 'indices': 3, + 'max_versions': None, 'name': 'RDB$RELATION_CONSTRAINTS', 'primary_pointer_page': 48, 'table_id': 22, + 'total_records': None, 'total_versions': None}, + {'avg_fill': 77, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 13, 'data_pages': 13, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=9, d100=3), 'index_root_page': 15, 'indices': 3, + 'max_versions': None, 'name': 'RDB$RELATION_FIELDS', 'primary_pointer_page': 14, 'table_id': 5, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 2, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 67, 'indices': 1, + 'max_versions': None, 'name': 'RDB$ROLES', 'primary_pointer_page': 66, 'table_id': 31, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 77, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 6, 'data_pages': 6, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=1, d100=4), 'index_root_page': 23, 'indices': 1, + 'max_versions': None, 'name': 'RDB$SECURITY_CLASSES', 'primary_pointer_page': 22, 'table_id': 9, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 43, 'indices': 1, + 'max_versions': None, 'name': 'RDB$TRANSACTIONS', 'primary_pointer_page': 42, 'table_id': 19, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 90, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 7, 'data_pages': 7, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=2, d100=5), 'index_root_page': 29, 'indices': 2, + 'max_versions': None, 'name': 'RDB$TRIGGERS', 'primary_pointer_page': 28, 'table_id': 12, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 68, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 39, 'indices': 1, + 'max_versions': None, 'name': 'RDB$TRIGGER_MESSAGES', 'primary_pointer_page': 38, 'table_id': 17, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 70, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 5, 'data_pages': 5, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=5, d100=0), 'index_root_page': 27, 'indices': 1, + 'max_versions': None, 'name': 'RDB$TYPES', 'primary_pointer_page': 26, 'table_id': 11, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 67, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 4, 'data_pages': 4, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=3, d100=0), 'index_root_page': 41, 'indices': 2, + 'max_versions': None, 'name': 'RDB$USER_PRIVILEGES', 'primary_pointer_page': 40, 'table_id': 18, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 3, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 19, 'indices': 2, + 'max_versions': None, 'name': 'RDB$VIEW_RELATIONS', 'primary_pointer_page': 18, 'table_id': 7, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 58, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), 'index_root_page': 200, 'indices': 4, + 'max_versions': None, 'name': 'SALARY_HISTORY', 'primary_pointer_page': 199, 'table_id': 137, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 68, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=0), 'index_root_page': 202, 'indices': 6, + 'max_versions': None, 'name': 'SALES', 'primary_pointer_page': 201, 'table_id': 138, 'total_records': None, + 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 282, 'indices': 1, + 'max_versions': None, 'name': 'T', 'primary_pointer_page': 205, 'table_id': 235, 'total_records': None, 'total_versions': None}, + {'avg_fill': 20, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 208, 'indices': 0, + 'max_versions': None, 'name': 'T2', 'primary_pointer_page': 207, 'table_id': 141, 'total_records': None, 'total_versions': None}, + {'avg_fill': 0, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 0, 'data_pages': 0, + 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 204, 'indices': 0, + 'max_versions': None, 'name': 'T3', 'primary_pointer_page': 203, 'table_id': 139, 'total_records': None, 'total_versions': None}, + {'avg_fill': 4, 'avg_record_length': None, 'avg_version_length': None, 'data_page_slots': 1, 'data_pages': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_root_page': 192, 'indices': 0, + 'max_versions': None, 'name': 'T4', 'primary_pointer_page': 191, 'table_id': 133, 'total_records': None, 'total_versions': None}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.tables[i]), 'Unexpected output from parser (tables)') + i += 1 + # Indices + data = [{'avg_data_length': 6.5, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY1', 'nodes': 14, 'total_dup': 0}, + {'avg_data_length': 15.87, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'CUSTNAMEX', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 17.27, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'CUSTREGION', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 4.87, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN23', 'nodes': 15, 'total_dup': 4}, + {'avg_data_length': 1.13, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY22', 'nodes': 15, 'total_dup': 0}, + {'avg_data_length': 5.38, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 3, 'name': 'BUDGETX', 'nodes': 21, 'total_dup': 7}, + {'avg_data_length': 13.95, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$4', 'nodes': 21, 'total_dup': 0}, + {'avg_data_length': 1.14, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 4, 'leaf_buckets': 1, 'max_dup': 3, 'name': 'RDB$FOREIGN10', 'nodes': 21, 'total_dup': 3}, + {'avg_data_length': 0.81, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN6', 'nodes': 21, 'total_dup': 13}, + {'avg_data_length': 1.71, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY5', 'nodes': 21, 'total_dup': 0}, + {'avg_data_length': 15.52, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'NAMEX', 'nodes': 42, 'total_dup': 0}, + {'avg_data_length': 0.81, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN8', 'nodes': 42, 'total_dup': 23}, + {'avg_data_length': 6.79, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN9', 'nodes': 42, 'total_dup': 15}, + {'avg_data_length': 1.31, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY7', 'nodes': 42, 'total_dup': 0}, + {'avg_data_length': 1.04, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$FOREIGN15', 'nodes': 28, 'total_dup': 6}, + {'avg_data_length': 0.86, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 9, 'name': 'RDB$FOREIGN16', 'nodes': 28, 'total_dup': 23}, + {'avg_data_length': 9.11, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY14', 'nodes': 28, 'total_dup': 0}, + {'avg_data_length': 10.9, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 1, 'name': 'MAXSALX', 'nodes': 31, 'total_dup': 5}, + {'avg_data_length': 10.29, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 2, 'name': 'MINSALX', 'nodes': 31, 'total_dup': 7}, + {'avg_data_length': 1.39, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 20, 'name': 'RDB$FOREIGN3', 'nodes': 31, 'total_dup': 24}, + {'avg_data_length': 10.45, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY2', 'nodes': 31, 'total_dup': 0}, + {'avg_data_length': 22.5, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'PRODTYPEX', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 13.33, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$11', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 1.33, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$FOREIGN13', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 4.83, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY12', 'nodes': 6, 'total_dup': 0}, + {'avg_data_length': 0.71, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 5, 'name': 'RDB$FOREIGN18', 'nodes': 24, 'total_dup': 15}, + {'avg_data_length': 1.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 8, 'name': 'RDB$FOREIGN19', 'nodes': 24, 'total_dup': 19}, + {'avg_data_length': 6.83, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY17', 'nodes': 24, 'total_dup': 0}, + {'avg_data_length': 0.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_44', 'nodes': 0, 'total_dup': 0}, + {'avg_data_length': 2.98, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_19', 'nodes': 52, 'total_dup': 0}, + {'avg_data_length': 1.04, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_25', 'nodes': 52, 'total_dup': 0}, + {'avg_data_length': 0.9, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 1, 'name': 'RDB$INDEX_14', 'nodes': 70, 'total_dup': 14}, + {'avg_data_length': 3.81, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$INDEX_40', 'nodes': 70, 'total_dup': 11}, + {'avg_data_length': 3.77, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_20', 'nodes': 149, 'total_dup': 0}, + {'avg_data_length': 1.79, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_26', 'nodes': 149, 'total_dup': 0}, + {'avg_data_length': 1.18, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 13, 'name': 'RDB$INDEX_27', 'nodes': 163, 'total_dup': 118}, + {'avg_data_length': 1.01, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 36, 'name': 'RDB$INDEX_28', 'nodes': 163, 'total_dup': 145}, + {'avg_data_length': 14.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_23', 'nodes': 5, 'total_dup': 0}, + {'avg_data_length': 1.2, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_24', 'nodes': 5, 'total_dup': 0}, + {'avg_data_length': 4.58, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_2', 'nodes': 245, 'total_dup': 0}, + {'avg_data_length': 1.26, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$INDEX_36', 'nodes': 19, 'total_dup': 3}, + {'avg_data_length': 0.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_17', 'nodes': 0, 'total_dup': 0}, + {'avg_data_length': 0.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_45', 'nodes': 0, 'total_dup': 0}, + {'avg_data_length': 4.63, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_16', 'nodes': 19, 'total_dup': 0}, + {'avg_data_length': 13.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_9', 'nodes': 2, 'total_dup': 0}, + {'avg_data_length': 3.71, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 3, 'name': 'RDB$INDEX_10', 'nodes': 7, 'total_dup': 5}, + {'avg_data_length': 11.91, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_11', 'nodes': 11, 'total_dup': 0}, + {'avg_data_length': 1.09, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_46', 'nodes': 11, 'total_dup': 0}, + {'avg_data_length': 1.5, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$INDEX_6', 'nodes': 150, 'total_dup': 24}, + {'avg_data_length': 4.23, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 5, 'name': 'RDB$INDEX_31', 'nodes': 88, 'total_dup': 48}, + {'avg_data_length': 0.19, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 73, 'name': 'RDB$INDEX_41', 'nodes': 88, 'total_dup': 81}, + {'avg_data_length': 2.09, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_5', 'nodes': 88, 'total_dup': 0}, + {'avg_data_length': 10.6, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_21', 'nodes': 10, 'total_dup': 0}, + {'avg_data_length': 1.1, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_22', 'nodes': 10, 'total_dup': 0}, + {'avg_data_length': 11.33, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_18', 'nodes': 33, 'total_dup': 0}, + {'avg_data_length': 1.24, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_47', 'nodes': 33, 'total_dup': 0}, + {'avg_data_length': 0.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 32, 'name': 'RDB$INDEX_48', 'nodes': 33, 'total_dup': 32}, + {'avg_data_length': 1.93, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_13', 'nodes': 14, 'total_dup': 0}, + {'avg_data_length': 8.81, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_0', 'nodes': 58, 'total_dup': 0}, + {'avg_data_length': 0.82, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 14, 'name': 'RDB$INDEX_1', 'nodes': 73, 'total_dup': 14}, + {'avg_data_length': 1.07, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_12', 'nodes': 82, 'total_dup': 0}, + {'avg_data_length': 6.43, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 8, 'name': 'RDB$INDEX_42', 'nodes': 82, 'total_dup': 43}, + {'avg_data_length': 0.6, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 54, 'name': 'RDB$INDEX_43', 'nodes': 82, 'total_dup': 54}, + {'avg_data_length': 20.92, 'depth': 2, 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=1, d100=2), + 'index_id': 2, 'leaf_buckets': 4, 'max_dup': 0, 'name': 'RDB$INDEX_15', 'nodes': 466, 'total_dup': 0}, + {'avg_data_length': 2.33, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 31, 'name': 'RDB$INDEX_3', 'nodes': 466, 'total_dup': 255}, + {'avg_data_length': 1.1, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 27, 'name': 'RDB$INDEX_4', 'nodes': 466, 'total_dup': 408}, + {'avg_data_length': 9.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_39', 'nodes': 2, 'total_dup': 0}, + {'avg_data_length': 1.06, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_7', 'nodes': 182, 'total_dup': 0}, + {'avg_data_length': 0.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_32', 'nodes': 0, 'total_dup': 0}, + {'avg_data_length': 2.84, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 18, 'name': 'RDB$INDEX_38', 'nodes': 69, 'total_dup': 48}, + {'avg_data_length': 2.09, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_8', 'nodes': 69, 'total_dup': 0}, + {'avg_data_length': 1.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 5, 'name': 'RDB$INDEX_35', 'nodes': 36, 'total_dup': 12}, + {'avg_data_length': 4.22, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 1, 'name': 'RDB$INDEX_37', 'nodes': 228, 'total_dup': 16}, + {'avg_data_length': 1.24, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 9, 'name': 'RDB$INDEX_29', 'nodes': 173, 'total_dup': 144}, + {'avg_data_length': 0.07, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 104, 'name': 'RDB$INDEX_30', 'nodes': 173, 'total_dup': 171}, + {'avg_data_length': 5.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 1, 'name': 'RDB$INDEX_33', 'nodes': 2, 'total_dup': 1}, + {'avg_data_length': 9.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$INDEX_34', 'nodes': 2, 'total_dup': 0}, + {'avg_data_length': 0.31, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 21, 'name': 'CHANGEX', 'nodes': 49, 'total_dup': 46}, + {'avg_data_length': 0.9, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 2, 'name': 'RDB$FOREIGN21', 'nodes': 49, 'total_dup': 16}, + {'avg_data_length': 18.29, 'depth': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY20', 'nodes': 49, 'total_dup': 0}, + {'avg_data_length': 0.29, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 28, 'name': 'UPDATERX', 'nodes': 49, 'total_dup': 46}, + {'avg_data_length': 2.55, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 6, 'name': 'NEEDX', 'nodes': 33, 'total_dup': 11}, + {'avg_data_length': 1.85, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 3, 'name': 'QTYX', 'nodes': 33, 'total_dup': 11}, + {'avg_data_length': 0.52, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 4, 'leaf_buckets': 1, 'max_dup': 4, 'name': 'RDB$FOREIGN25', 'nodes': 33, 'total_dup': 18}, + {'avg_data_length': 0.45, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 5, 'leaf_buckets': 1, 'max_dup': 7, 'name': 'RDB$FOREIGN26', 'nodes': 33, 'total_dup': 25}, + {'avg_data_length': 4.48, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY24', 'nodes': 33, 'total_dup': 0}, + {'avg_data_length': 0.97, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 14, 'name': 'SALESTATX', 'nodes': 33, 'total_dup': 27}, + {'avg_data_length': 0.0, 'depth': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, 'name': 'RDB$PRIMARY104', 'nodes': 0, 'total_dup': 0}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.indices[i], ['table']), 'Unexpected output from parser (indices)') + i += 1 + def test_parse30_h(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat30-h.out')) + data = {'attributes': (0,), 'backup_diff_file': None, 'backup_guid': '{F978F787-7023-4C4A-F79D-8D86645B0487}', + 'bumped_transaction': None, 'checksum': 12345, 'completed': datetime.datetime(2018, 4, 4, 15, 41, 34), + 'continuation_file': None, 'continuation_files': 0, 'creation_date': datetime.datetime(2015, 11, 27, 11, 19, 39), + 'database_dialect': 3, 'encrypted_blob_pages': None, 'encrypted_data_pages': None, 'encrypted_index_pages': None, + 'executed': datetime.datetime(2018, 4, 4, 15, 41, 34), 'filename': '/home/fdb/test/FBTEST30.FDB', 'flags': 0, + 'generation': 2176, 'gstat_version': 3, 'implementation': 'HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc', + 'implementation_id': 0, 'indices': 0, 'last_logical_page': None, 'next_attachment_id': 1199, 'next_header_page': 0, + 'next_transaction': 2141, 'oat': 2140, 'ods_version': '12.0', 'oit': 179, 'ost': 2140, 'page_buffers': 0, + 'page_size': 8192, 'replay_logging_file': None, 'root_filename': None, 'sequence_number': 0, 'shadow_count': 0, + 'sweep_interval': None, 'system_change_number': 24, 'tables': 0} + self.assertIsInstance(db, gstat.StatDatabase) + self.assertDictEqual(data, get_object_data(db), 'Unexpected output from parser (database hdr)') + # + self.assertFalse(db.has_table_stats()) + self.assertFalse(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertFalse(db.has_system()) + def test_parse30_a(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat30-a.out')) + # Database + data = {'attributes': (0,), 'backup_diff_file': None, 'backup_guid': '{F978F787-7023-4C4A-F79D-8D86645B0487}', + 'bumped_transaction': None, 'checksum': 12345, 'completed': datetime.datetime(2018, 4, 4, 15, 42), + 'continuation_file': None, 'continuation_files': 0, 'creation_date': datetime.datetime(2015, 11, 27, 11, 19, 39), + 'database_dialect': 3, 'encrypted_blob_pages': None, 'encrypted_data_pages': None, 'encrypted_index_pages': None, + 'executed': datetime.datetime(2018, 4, 4, 15, 42), 'filename': '/home/fdb/test/FBTEST30.FDB', 'flags': 0, + 'generation': 2176, 'gstat_version': 3, 'implementation': 'HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc', + 'implementation_id': 0, 'indices': 39, 'last_logical_page': None, 'next_attachment_id': 1199, 'next_header_page': 0, + 'next_transaction': 2141, 'oat': 2140, 'ods_version': '12.0', 'oit': 179, 'ost': 2140, 'page_buffers': 0, + 'page_size': 8192, 'replay_logging_file': None, 'root_filename': None, 'sequence_number': 0, 'shadow_count': 0, + 'sweep_interval': None, 'system_change_number': 24, 'tables': 16} + self.assertDictEqual(data, get_object_data(db), 'Unexpected output from parser (database hdr)') + # + self.assertTrue(db.has_table_stats()) + self.assertTrue(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertFalse(db.has_system()) + # Tables + data = [{'avg_fill': 86, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 3, 'data_pages': 3, 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=2), + 'empty_pages': 0, 'full_pages': 1, 'index_root_page': 299, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'AR', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 297, 'secondary_pages': 2, 'swept_pages': 0, 'table_id': 140, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 8, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 183, 'indices': 1, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'COUNTRY', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 182, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 128, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 26, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 262, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'CUSTOMER', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 261, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 137, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 24, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 199, 'indices': 5, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'DEPARTMENT', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 198, 'secondary_pages': 0, 'swept_pages': 1, 'table_id': 130, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 44, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 213, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'EMPLOYEE', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 212, 'secondary_pages': 0, 'swept_pages': 1, 'table_id': 131, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 10, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 235, 'indices': 3, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'EMPLOYEE_PROJECT', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 234, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 134, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 54, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=1, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 190, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'JOB', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 189, 'secondary_pages': 1, 'swept_pages': 1, 'table_id': 129, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 7, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=2, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 221, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'PROJECT', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 220, 'secondary_pages': 1, 'swept_pages': 1, 'table_id': 133, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 20, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=1, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 248, 'indices': 3, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'PROJ_DEPT_BUDGET', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 239, 'secondary_pages': 1, 'swept_pages': 0, 'table_id': 135, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 30, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 254, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'SALARY_HISTORY', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 253, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 136, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 35, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 268, 'indices': 6, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'SALES', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 267, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 138, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 0, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 0, 'data_pages': 0, 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 324, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'T', 'pointer_pages': 1, 'primary_pages': 0, + 'primary_pointer_page': 323, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 147, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 8, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=2, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 303, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'T2', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 302, 'secondary_pages': 1, 'swept_pages': 0, 'table_id': 142, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 3, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=2, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 306, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'T3', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 305, 'secondary_pages': 1, 'swept_pages': 0, 'table_id': 143, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 3, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 308, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'T4', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 307, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 144, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 0, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 0, 'data_pages': 0, 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 316, 'indices': 1, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'T5', 'pointer_pages': 1, 'primary_pages': 0, + 'primary_pointer_page': 315, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 145, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.tables[i]), 'Unexpected output from parser (tables)') + i += 1 + # Indices + data = [{'avg_data_length': 6.44, 'avg_key_length': 8.63, 'avg_node_length': 10.44, 'avg_prefix_length': 0.44, + 'clustering_factor': 1.0, 'compression_ratio': 0.8, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY1', 'nodes': 16, 'ratio': 0.06, 'root_page': 186, 'total_dup': 0}, + {'avg_data_length': 15.87, 'avg_key_length': 18.27, 'avg_node_length': 19.87, 'avg_prefix_length': 0.6, + 'clustering_factor': 1.0, 'compression_ratio': 0.9, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'CUSTNAMEX', 'nodes': 15, 'ratio': 0.07, 'root_page': 276, 'total_dup': 0}, + {'avg_data_length': 17.27, 'avg_key_length': 20.2, 'avg_node_length': 21.27, 'avg_prefix_length': 2.33, + 'clustering_factor': 1.0, 'compression_ratio': 0.97, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'CUSTREGION', 'nodes': 15, 'ratio': 0.07, 'root_page': 283, 'total_dup': 0}, + {'avg_data_length': 4.87, 'avg_key_length': 6.93, 'avg_node_length': 8.6, 'avg_prefix_length': 0.87, + 'clustering_factor': 1.0, 'compression_ratio': 0.83, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN23', 'nodes': 15, 'ratio': 0.07, 'root_page': 264, 'total_dup': 4}, + {'avg_data_length': 1.13, 'avg_key_length': 3.13, 'avg_node_length': 4.2, 'avg_prefix_length': 1.87, + 'clustering_factor': 1.0, 'compression_ratio': 0.96, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY22', 'nodes': 15, 'ratio': 0.07, 'root_page': 263, 'total_dup': 0}, + {'avg_data_length': 5.38, 'avg_key_length': 8.0, 'avg_node_length': 9.05, 'avg_prefix_length': 3.62, + 'clustering_factor': 1.0, 'compression_ratio': 1.13, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 3, + 'name': 'BUDGETX', 'nodes': 21, 'ratio': 0.05, 'root_page': 284, 'total_dup': 7}, + {'avg_data_length': 13.95, 'avg_key_length': 16.57, 'avg_node_length': 17.95, 'avg_prefix_length': 5.29, + 'clustering_factor': 1.0, 'compression_ratio': 1.16, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$4', 'nodes': 21, 'ratio': 0.05, 'root_page': 208, 'total_dup': 0}, + {'avg_data_length': 1.14, 'avg_key_length': 3.24, 'avg_node_length': 4.29, 'avg_prefix_length': 0.81, + 'clustering_factor': 1.0, 'compression_ratio': 0.6, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, 'leaf_buckets': 1, 'max_dup': 3, + 'name': 'RDB$FOREIGN10', 'nodes': 21, 'ratio': 0.05, 'root_page': 219, 'total_dup': 3}, + {'avg_data_length': 0.81, 'avg_key_length': 2.95, 'avg_node_length': 4.1, 'avg_prefix_length': 2.05, + 'clustering_factor': 1.0, 'compression_ratio': 0.97, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN6', 'nodes': 21, 'ratio': 0.05, 'root_page': 210, 'total_dup': 13}, + {'avg_data_length': 1.71, 'avg_key_length': 4.05, 'avg_node_length': 5.24, 'avg_prefix_length': 1.29, + 'clustering_factor': 1.0, 'compression_ratio': 0.74, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY5', 'nodes': 21, 'ratio': 0.05, 'root_page': 209, 'total_dup': 0}, + {'avg_data_length': 15.52, 'avg_key_length': 18.5, 'avg_node_length': 19.52, 'avg_prefix_length': 2.17, + 'clustering_factor': 1.0, 'compression_ratio': 0.96, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'NAMEX', 'nodes': 42, 'ratio': 0.02, 'root_page': 285, 'total_dup': 0}, + {'avg_data_length': 0.81, 'avg_key_length': 2.98, 'avg_node_length': 4.07, 'avg_prefix_length': 2.19, + 'clustering_factor': 1.0, 'compression_ratio': 1.01, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN8', 'nodes': 42, 'ratio': 0.02, 'root_page': 215, 'total_dup': 23}, + {'avg_data_length': 6.79, 'avg_key_length': 9.4, 'avg_node_length': 10.43, 'avg_prefix_length': 9.05, + 'clustering_factor': 1.0, 'compression_ratio': 1.68, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN9', 'nodes': 42, 'ratio': 0.02, 'root_page': 216, 'total_dup': 15}, + {'avg_data_length': 1.31, 'avg_key_length': 3.6, 'avg_node_length': 4.62, 'avg_prefix_length': 1.17, + 'clustering_factor': 1.0, 'compression_ratio': 0.69, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY7', 'nodes': 42, 'ratio': 0.02, 'root_page': 214, 'total_dup': 0}, + {'avg_data_length': 1.04, 'avg_key_length': 3.25, 'avg_node_length': 4.29, 'avg_prefix_length': 1.36, + 'clustering_factor': 1.0, 'compression_ratio': 0.74, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 2, + 'name': 'RDB$FOREIGN15', 'nodes': 28, 'ratio': 0.04, 'root_page': 237, 'total_dup': 6}, + {'avg_data_length': 0.86, 'avg_key_length': 2.89, 'avg_node_length': 4.04, 'avg_prefix_length': 4.14, + 'clustering_factor': 1.0, 'compression_ratio': 1.73, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 9, + 'name': 'RDB$FOREIGN16', 'nodes': 28, 'ratio': 0.04, 'root_page': 238, 'total_dup': 23}, + {'avg_data_length': 9.11, 'avg_key_length': 12.07, 'avg_node_length': 13.11, 'avg_prefix_length': 2.89, + 'clustering_factor': 1.0, 'compression_ratio': 0.99, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY14', 'nodes': 28, 'ratio': 0.04, 'root_page': 236, 'total_dup': 0}, + {'avg_data_length': 10.9, 'avg_key_length': 13.71, 'avg_node_length': 14.74, 'avg_prefix_length': 7.87, + 'clustering_factor': 1.0, 'compression_ratio': 1.37, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 1, + 'name': 'MAXSALX', 'nodes': 31, 'ratio': 0.03, 'root_page': 286, 'total_dup': 5}, + {'avg_data_length': 10.29, 'avg_key_length': 13.03, 'avg_node_length': 14.06, 'avg_prefix_length': 8.48, + 'clustering_factor': 1.0, 'compression_ratio': 1.44, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 2, + 'name': 'MINSALX', 'nodes': 31, 'ratio': 0.03, 'root_page': 287, 'total_dup': 7}, + {'avg_data_length': 1.39, 'avg_key_length': 3.39, 'avg_node_length': 4.61, 'avg_prefix_length': 2.77, + 'clustering_factor': 1.0, 'compression_ratio': 1.23, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 20, + 'name': 'RDB$FOREIGN3', 'nodes': 31, 'ratio': 0.03, 'root_page': 192, 'total_dup': 24}, + {'avg_data_length': 10.45, 'avg_key_length': 13.42, 'avg_node_length': 14.45, 'avg_prefix_length': 6.19, + 'clustering_factor': 1.0, 'compression_ratio': 1.24, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY2', 'nodes': 31, 'ratio': 0.03, 'root_page': 191, 'total_dup': 0}, + {'avg_data_length': 22.5, 'avg_key_length': 25.33, 'avg_node_length': 26.5, 'avg_prefix_length': 4.17, + 'clustering_factor': 1.0, 'compression_ratio': 1.05, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'PRODTYPEX', 'nodes': 6, 'ratio': 0.17, 'root_page': 288, 'total_dup': 0}, + {'avg_data_length': 13.33, 'avg_key_length': 15.5, 'avg_node_length': 17.33, 'avg_prefix_length': 0.33, + 'clustering_factor': 1.0, 'compression_ratio': 0.88, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$11', 'nodes': 6, 'ratio': 0.17, 'root_page': 222, 'total_dup': 0}, + {'avg_data_length': 1.33, 'avg_key_length': 3.5, 'avg_node_length': 4.67, 'avg_prefix_length': 0.67, + 'clustering_factor': 1.0, 'compression_ratio': 0.57, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$FOREIGN13', 'nodes': 6, 'ratio': 0.17, 'root_page': 232, 'total_dup': 0}, + {'avg_data_length': 4.83, 'avg_key_length': 7.0, 'avg_node_length': 8.83, 'avg_prefix_length': 0.17, + 'clustering_factor': 1.0, 'compression_ratio': 0.71, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY12', 'nodes': 6, 'ratio': 0.17, 'root_page': 223, 'total_dup': 0}, + {'avg_data_length': 0.71, 'avg_key_length': 2.79, 'avg_node_length': 3.92, 'avg_prefix_length': 2.29, + 'clustering_factor': 1.0, 'compression_ratio': 1.07, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 5, + 'name': 'RDB$FOREIGN18', 'nodes': 24, 'ratio': 0.04, 'root_page': 250, 'total_dup': 15}, + {'avg_data_length': 1.0, 'avg_key_length': 3.04, 'avg_node_length': 4.21, 'avg_prefix_length': 4.0, + 'clustering_factor': 1.0, 'compression_ratio': 1.64, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 8, + 'name': 'RDB$FOREIGN19', 'nodes': 24, 'ratio': 0.04, 'root_page': 251, 'total_dup': 19}, + {'avg_data_length': 6.83, 'avg_key_length': 9.67, 'avg_node_length': 10.71, 'avg_prefix_length': 12.17, + 'clustering_factor': 1.0, 'compression_ratio': 1.97, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY17', 'nodes': 24, 'ratio': 0.04, 'root_page': 249, 'total_dup': 0}, + {'avg_data_length': 0.31, 'avg_key_length': 2.35, 'avg_node_length': 3.37, 'avg_prefix_length': 6.69, + 'clustering_factor': 1.0, 'compression_ratio': 2.98, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 21, + 'name': 'CHANGEX', 'nodes': 49, 'ratio': 0.02, 'root_page': 289, 'total_dup': 46}, + {'avg_data_length': 0.9, 'avg_key_length': 3.1, 'avg_node_length': 4.12, 'avg_prefix_length': 1.43, + 'clustering_factor': 1.0, 'compression_ratio': 0.75, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 2, + 'name': 'RDB$FOREIGN21', 'nodes': 49, 'ratio': 0.02, 'root_page': 256, 'total_dup': 16}, + {'avg_data_length': 18.29, 'avg_key_length': 21.27, 'avg_node_length': 22.29, 'avg_prefix_length': 4.31, + 'clustering_factor': 1.0, 'compression_ratio': 1.06, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY20', 'nodes': 49, 'ratio': 0.02, 'root_page': 255, 'total_dup': 0}, + {'avg_data_length': 0.29, 'avg_key_length': 2.29, 'avg_node_length': 3.35, 'avg_prefix_length': 5.39, + 'clustering_factor': 1.0, 'compression_ratio': 2.48, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 28, + 'name': 'UPDATERX', 'nodes': 49, 'ratio': 0.02, 'root_page': 290, 'total_dup': 46}, + {'avg_data_length': 2.55, 'avg_key_length': 4.94, 'avg_node_length': 5.97, 'avg_prefix_length': 2.88, + 'clustering_factor': 1.0, 'compression_ratio': 1.1, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 6, + 'name': 'NEEDX', 'nodes': 33, 'ratio': 0.03, 'root_page': 291, 'total_dup': 11}, + {'avg_data_length': 1.85, 'avg_key_length': 4.03, 'avg_node_length': 5.06, 'avg_prefix_length': 11.18, + 'clustering_factor': 1.0, 'compression_ratio': 3.23, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, 'leaf_buckets': 1, 'max_dup': 3, + 'name': 'QTYX', 'nodes': 33, 'ratio': 0.03, 'root_page': 292, 'total_dup': 11}, + {'avg_data_length': 0.52, 'avg_key_length': 2.52, 'avg_node_length': 3.55, 'avg_prefix_length': 2.48, + 'clustering_factor': 1.0, 'compression_ratio': 1.19, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN25', 'nodes': 33, 'ratio': 0.03, 'root_page': 270, 'total_dup': 18}, + {'avg_data_length': 0.45, 'avg_key_length': 2.64, 'avg_node_length': 3.67, 'avg_prefix_length': 2.21, + 'clustering_factor': 1.0, 'compression_ratio': 1.01, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 7, + 'name': 'RDB$FOREIGN26', 'nodes': 33, 'ratio': 0.03, 'root_page': 271, 'total_dup': 25}, + {'avg_data_length': 4.48, 'avg_key_length': 7.42, 'avg_node_length': 8.45, 'avg_prefix_length': 3.52, + 'clustering_factor': 1.0, 'compression_ratio': 1.08, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY24', 'nodes': 33, 'ratio': 0.03, 'root_page': 269, 'total_dup': 0}, + {'avg_data_length': 0.97, 'avg_key_length': 3.03, 'avg_node_length': 4.06, 'avg_prefix_length': 9.82, + 'clustering_factor': 1.0, 'compression_ratio': 3.56, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 5, 'leaf_buckets': 1, 'max_dup': 14, + 'name': 'SALESTATX', 'nodes': 33, 'ratio': 0.03, 'root_page': 293, 'total_dup': 27}, + {'avg_data_length': 0.0, 'avg_key_length': 0.0, 'avg_node_length': 0.0, 'avg_prefix_length': 0.0, + 'clustering_factor': 0.0, 'compression_ratio': 0.0, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY28', 'nodes': 0, 'ratio': 0.0, 'root_page': 317, 'total_dup': 0}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.indices[i], ['table']), 'Unexpected output from parser (indices)') + i += 1 + def test_parse30_d(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat30-d.out')) + # + self.assertTrue(db.has_table_stats()) + self.assertFalse(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertFalse(db.has_system()) + # Tables + data = [{'avg_fill': 86, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 3, 'data_pages': 3, 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=2), + 'empty_pages': 0, 'full_pages': 1, 'index_root_page': 299, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'AR', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 297, 'secondary_pages': 2, 'swept_pages': 0, 'table_id': 140, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 8, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 183, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'COUNTRY', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 182, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 128, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 26, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 262, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'CUSTOMER', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 261, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 137, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 24, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 199, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'DEPARTMENT', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 198, 'secondary_pages': 0, 'swept_pages': 1, 'table_id': 130, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 44, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 213, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'EMPLOYEE', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 212, 'secondary_pages': 0, 'swept_pages': 1, 'table_id': 131, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 10, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 235, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'EMPLOYEE_PROJECT', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 234, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 134, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 54, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=1, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 190, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'JOB', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 189, 'secondary_pages': 1, 'swept_pages': 1, 'table_id': 129, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 7, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=2, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 221, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'PROJECT', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 220, 'secondary_pages': 1, 'swept_pages': 1, 'table_id': 133, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 20, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=1, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 248, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'PROJ_DEPT_BUDGET', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 239, 'secondary_pages': 1, 'swept_pages': 0, 'table_id': 135, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 30, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 254, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'SALARY_HISTORY', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 253, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 136, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 35, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 268, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'SALES', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 267, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 138, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 0, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 0, 'data_pages': 0, 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 324, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'T', 'pointer_pages': 1, 'primary_pages': 0, + 'primary_pointer_page': 323, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 147, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 8, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=2, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 303, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'T2', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 302, 'secondary_pages': 1, 'swept_pages': 0, 'table_id': 142, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 3, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=2, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 306, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'T3', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 305, 'secondary_pages': 1, 'swept_pages': 0, 'table_id': 143, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 3, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 308, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'T4', 'pointer_pages': 1, 'primary_pages': 1, + 'primary_pointer_page': 307, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 144, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': 0, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': 0, 'data_pages': 0, 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 316, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': None, 'max_versions': None, 'name': 'T5', 'pointer_pages': 1, 'primary_pages': 0, + 'primary_pointer_page': 315, 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 145, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.tables[i]), 'Unexpected output from parser (tables)') + i += 1 + # Indices + self.assertEqual(len(db.indices), 0) + def test_parse30_e(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat30-e.out')) + data = {'attributes': (0,), 'backup_diff_file': None, 'backup_guid': '{F978F787-7023-4C4A-F79D-8D86645B0487}', + 'bumped_transaction': None, 'checksum': 12345, 'completed': datetime.datetime(2018, 4, 4, 15, 45, 6), + 'continuation_file': None, 'continuation_files': 0, 'creation_date': datetime.datetime(2015, 11, 27, 11, 19, 39), + 'database_dialect': 3, 'encrypted_blob_pages': Encryption(pages=11, encrypted=0, unencrypted=11), + 'encrypted_data_pages': Encryption(pages=121, encrypted=0, unencrypted=121), + 'encrypted_index_pages': Encryption(pages=96, encrypted=0, unencrypted=96), + 'executed': datetime.datetime(2018, 4, 4, 15, 45, 6), 'filename': '/home/fdb/test/FBTEST30.FDB', 'flags': 0, + 'generation': 2181, 'gstat_version': 3, 'implementation': 'HW=AMD/Intel/x64 little-endian OS=Linux CC=gcc', + 'implementation_id': 0, 'indices': 0, 'last_logical_page': None, 'next_attachment_id': 1214, + 'next_header_page': 0, 'next_transaction': 2146, 'oat': 2146, 'ods_version': '12.0', 'oit': 179, 'ost': 2146, + 'page_buffers': 0, 'page_size': 8192, 'replay_logging_file': None, 'root_filename': None, 'sequence_number': 0, + 'shadow_count': 0, 'sweep_interval': None, 'system_change_number': 24, 'tables': 0} + self.assertIsInstance(db, gstat.StatDatabase) + self.assertDictEqual(data, get_object_data(db), 'Unexpected output from parser (database hdr)') + # + self.assertFalse(db.has_table_stats()) + self.assertFalse(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertTrue(db.has_encryption_stats()) + self.assertFalse(db.has_system()) + def test_parse30_f(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat30-f.out')) + # + self.assertTrue(db.has_table_stats()) + self.assertTrue(db.has_index_stats()) + self.assertTrue(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertTrue(db.has_system()) + def test_parse30_i(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat30-i.out')) + # + self.assertFalse(db.has_table_stats()) + self.assertTrue(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + # Tables + data = [{'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'AR', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 140, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 1, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'COUNTRY', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 128, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'CUSTOMER', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 137, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 5, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'DEPARTMENT', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 130, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'EMPLOYEE', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 131, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 3, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'EMPLOYEE_PROJECT', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 134, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'JOB', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 129, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'PROJECT', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 133, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 3, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'PROJ_DEPT_BUDGET', 'pointer_pages': None, 'primary_pages': None, + 'primary_pointer_page': None, 'secondary_pages': None, 'swept_pages': None, 'table_id': 135, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'SALARY_HISTORY', 'pointer_pages': None, 'primary_pages': None, + 'primary_pointer_page': None, 'secondary_pages': None, 'swept_pages': None, 'table_id': 136, 'total_formats': None, + 'total_fragments': None, 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 6, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'SALES', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 138, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'T', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 147, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'T2', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 142, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'T3', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 143, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'T4', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 144, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}, + {'avg_fill': None, 'avg_fragment_length': None, 'avg_record_length': None, 'avg_unpacked_length': None, + 'avg_version_length': None, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': None, + 'data_page_slots': None, 'data_pages': None, 'distribution': None, 'empty_pages': None, 'full_pages': None, + 'index_root_page': None, 'indices': 1, 'level_0': None, 'level_1': None, 'level_2': None, 'max_fragments': None, + 'max_versions': None, 'name': 'T5', 'pointer_pages': None, 'primary_pages': None, 'primary_pointer_page': None, + 'secondary_pages': None, 'swept_pages': None, 'table_id': 145, 'total_formats': None, 'total_fragments': None, + 'total_records': None, 'total_versions': None, 'used_formats': None}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.tables[i]), 'Unexpected output from parser (tables)') + i += 1 + # Indices + data = [{'avg_data_length': 6.44, 'avg_key_length': 8.63, 'avg_node_length': 10.44, 'avg_prefix_length': 0.44, + 'clustering_factor': 1.0, 'compression_ratio': 0.8, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY1', 'nodes': 16, 'ratio': 0.06, 'root_page': 186, 'total_dup': 0}, + {'avg_data_length': 15.87, 'avg_key_length': 18.27, 'avg_node_length': 19.87, 'avg_prefix_length': 0.6, + 'clustering_factor': 1.0, 'compression_ratio': 0.9, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'CUSTNAMEX', 'nodes': 15, 'ratio': 0.07, 'root_page': 276, 'total_dup': 0}, + {'avg_data_length': 17.27, 'avg_key_length': 20.2, 'avg_node_length': 21.27, 'avg_prefix_length': 2.33, + 'clustering_factor': 1.0, 'compression_ratio': 0.97, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'CUSTREGION', 'nodes': 15, 'ratio': 0.07, 'root_page': 283, 'total_dup': 0}, + {'avg_data_length': 4.87, 'avg_key_length': 6.93, 'avg_node_length': 8.6, 'avg_prefix_length': 0.87, + 'clustering_factor': 1.0, 'compression_ratio': 0.83, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN23', 'nodes': 15, 'ratio': 0.07, 'root_page': 264, 'total_dup': 4}, + {'avg_data_length': 1.13, 'avg_key_length': 3.13, 'avg_node_length': 4.2, 'avg_prefix_length': 1.87, + 'clustering_factor': 1.0, 'compression_ratio': 0.96, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY22', 'nodes': 15, 'ratio': 0.07, 'root_page': 263, 'total_dup': 0}, + {'avg_data_length': 5.38, 'avg_key_length': 8.0, 'avg_node_length': 9.05, 'avg_prefix_length': 3.62, + 'clustering_factor': 1.0, 'compression_ratio': 1.13, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 3, + 'name': 'BUDGETX', 'nodes': 21, 'ratio': 0.05, 'root_page': 284, 'total_dup': 7}, + {'avg_data_length': 13.95, 'avg_key_length': 16.57, 'avg_node_length': 17.95, 'avg_prefix_length': 5.29, + 'clustering_factor': 1.0, 'compression_ratio': 1.16, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$4', 'nodes': 21, 'ratio': 0.05, 'root_page': 208, 'total_dup': 0}, + {'avg_data_length': 1.14, 'avg_key_length': 3.24, 'avg_node_length': 4.29, 'avg_prefix_length': 0.81, + 'clustering_factor': 1.0, 'compression_ratio': 0.6, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, 'leaf_buckets': 1, 'max_dup': 3, + 'name': 'RDB$FOREIGN10', 'nodes': 21, 'ratio': 0.05, 'root_page': 219, 'total_dup': 3}, + {'avg_data_length': 0.81, 'avg_key_length': 2.95, 'avg_node_length': 4.1, 'avg_prefix_length': 2.05, + 'clustering_factor': 1.0, 'compression_ratio': 0.97, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN6', 'nodes': 21, 'ratio': 0.05, 'root_page': 210, 'total_dup': 13}, + {'avg_data_length': 1.71, 'avg_key_length': 4.05, 'avg_node_length': 5.24, 'avg_prefix_length': 1.29, + 'clustering_factor': 1.0, 'compression_ratio': 0.74, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY5', 'nodes': 21, 'ratio': 0.05, 'root_page': 209, 'total_dup': 0}, + {'avg_data_length': 15.52, 'avg_key_length': 18.5, 'avg_node_length': 19.52, 'avg_prefix_length': 2.17, + 'clustering_factor': 1.0, 'compression_ratio': 0.96, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'NAMEX', 'nodes': 42, 'ratio': 0.02, 'root_page': 285, 'total_dup': 0}, + {'avg_data_length': 0.81, 'avg_key_length': 2.98, 'avg_node_length': 4.07, 'avg_prefix_length': 2.19, + 'clustering_factor': 1.0, 'compression_ratio': 1.01, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN8', 'nodes': 42, 'ratio': 0.02, 'root_page': 215, 'total_dup': 23}, + {'avg_data_length': 6.79, 'avg_key_length': 9.4, 'avg_node_length': 10.43, 'avg_prefix_length': 9.05, + 'clustering_factor': 1.0, 'compression_ratio': 1.68, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN9', 'nodes': 42, 'ratio': 0.02, 'root_page': 216, 'total_dup': 15}, + {'avg_data_length': 1.31, 'avg_key_length': 3.6, 'avg_node_length': 4.62, 'avg_prefix_length': 1.17, + 'clustering_factor': 1.0, 'compression_ratio': 0.69, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY7', 'nodes': 42, 'ratio': 0.02, 'root_page': 214, 'total_dup': 0}, + {'avg_data_length': 1.04, 'avg_key_length': 3.25, 'avg_node_length': 4.29, 'avg_prefix_length': 1.36, + 'clustering_factor': 1.0, 'compression_ratio': 0.74, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 2, + 'name': 'RDB$FOREIGN15', 'nodes': 28, 'ratio': 0.04, 'root_page': 237, 'total_dup': 6}, + {'avg_data_length': 0.86, 'avg_key_length': 2.89, 'avg_node_length': 4.04, 'avg_prefix_length': 4.14, + 'clustering_factor': 1.0, 'compression_ratio': 1.73, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 9, + 'name': 'RDB$FOREIGN16', 'nodes': 28, 'ratio': 0.04, 'root_page': 238, 'total_dup': 23}, + {'avg_data_length': 9.11, 'avg_key_length': 12.07, 'avg_node_length': 13.11, 'avg_prefix_length': 2.89, + 'clustering_factor': 1.0, 'compression_ratio': 0.99, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY14', 'nodes': 28, 'ratio': 0.04, 'root_page': 236, 'total_dup': 0}, + {'avg_data_length': 10.9, 'avg_key_length': 13.71, 'avg_node_length': 14.74, 'avg_prefix_length': 7.87, + 'clustering_factor': 1.0, 'compression_ratio': 1.37, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 1, + 'name': 'MAXSALX', 'nodes': 31, 'ratio': 0.03, 'root_page': 286, 'total_dup': 5}, + {'avg_data_length': 10.29, 'avg_key_length': 13.03, 'avg_node_length': 14.06, 'avg_prefix_length': 8.48, + 'clustering_factor': 1.0, 'compression_ratio': 1.44, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 2, + 'name': 'MINSALX', 'nodes': 31, 'ratio': 0.03, 'root_page': 287, 'total_dup': 7}, + {'avg_data_length': 1.39, 'avg_key_length': 3.39, 'avg_node_length': 4.61, 'avg_prefix_length': 2.77, + 'clustering_factor': 1.0, 'compression_ratio': 1.23, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 20, + 'name': 'RDB$FOREIGN3', 'nodes': 31, 'ratio': 0.03, 'root_page': 192, 'total_dup': 24}, + {'avg_data_length': 10.45, 'avg_key_length': 13.42, 'avg_node_length': 14.45, 'avg_prefix_length': 6.19, + 'clustering_factor': 1.0, 'compression_ratio': 1.24, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY2', 'nodes': 31, 'ratio': 0.03, 'root_page': 191, 'total_dup': 0}, + {'avg_data_length': 22.5, 'avg_key_length': 25.33, 'avg_node_length': 26.5, 'avg_prefix_length': 4.17, + 'clustering_factor': 1.0, 'compression_ratio': 1.05, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'PRODTYPEX', 'nodes': 6, 'ratio': 0.17, 'root_page': 288, 'total_dup': 0}, + {'avg_data_length': 13.33, 'avg_key_length': 15.5, 'avg_node_length': 17.33, 'avg_prefix_length': 0.33, + 'clustering_factor': 1.0, 'compression_ratio': 0.88, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$11', 'nodes': 6, 'ratio': 0.17, 'root_page': 222, 'total_dup': 0}, + {'avg_data_length': 1.33, 'avg_key_length': 3.5, 'avg_node_length': 4.67, 'avg_prefix_length': 0.67, + 'clustering_factor': 1.0, 'compression_ratio': 0.57, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$FOREIGN13', 'nodes': 6, 'ratio': 0.17, 'root_page': 232, 'total_dup': 0}, + {'avg_data_length': 4.83, 'avg_key_length': 7.0, 'avg_node_length': 8.83, 'avg_prefix_length': 0.17, + 'clustering_factor': 1.0, 'compression_ratio': 0.71, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY12', 'nodes': 6, 'ratio': 0.17, 'root_page': 223, 'total_dup': 0}, + {'avg_data_length': 0.71, 'avg_key_length': 2.79, 'avg_node_length': 3.92, 'avg_prefix_length': 2.29, + 'clustering_factor': 1.0, 'compression_ratio': 1.07, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 5, + 'name': 'RDB$FOREIGN18', 'nodes': 24, 'ratio': 0.04, 'root_page': 250, 'total_dup': 15}, + {'avg_data_length': 1.0, 'avg_key_length': 3.04, 'avg_node_length': 4.21, 'avg_prefix_length': 4.0, + 'clustering_factor': 1.0, 'compression_ratio': 1.64, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 8, + 'name': 'RDB$FOREIGN19', 'nodes': 24, 'ratio': 0.04, 'root_page': 251, 'total_dup': 19}, + {'avg_data_length': 6.83, 'avg_key_length': 9.67, 'avg_node_length': 10.71, 'avg_prefix_length': 12.17, + 'clustering_factor': 1.0, 'compression_ratio': 1.97, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY17', 'nodes': 24, 'ratio': 0.04, 'root_page': 249, 'total_dup': 0}, + {'avg_data_length': 0.31, 'avg_key_length': 2.35, 'avg_node_length': 3.37, 'avg_prefix_length': 6.69, + 'clustering_factor': 1.0, 'compression_ratio': 2.98, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 21, + 'name': 'CHANGEX', 'nodes': 49, 'ratio': 0.02, 'root_page': 289, 'total_dup': 46}, + {'avg_data_length': 0.9, 'avg_key_length': 3.1, 'avg_node_length': 4.12, 'avg_prefix_length': 1.43, + 'clustering_factor': 1.0, 'compression_ratio': 0.75, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 2, + 'name': 'RDB$FOREIGN21', 'nodes': 49, 'ratio': 0.02, 'root_page': 256, 'total_dup': 16}, + {'avg_data_length': 18.29, 'avg_key_length': 21.27, 'avg_node_length': 22.29, 'avg_prefix_length': 4.31, + 'clustering_factor': 1.0, 'compression_ratio': 1.06, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY20', 'nodes': 49, 'ratio': 0.02, 'root_page': 255, 'total_dup': 0}, + {'avg_data_length': 0.29, 'avg_key_length': 2.29, 'avg_node_length': 3.35, 'avg_prefix_length': 5.39, + 'clustering_factor': 1.0, 'compression_ratio': 2.48, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 28, + 'name': 'UPDATERX', 'nodes': 49, 'ratio': 0.02, 'root_page': 290, 'total_dup': 46}, + {'avg_data_length': 2.55, 'avg_key_length': 4.94, 'avg_node_length': 5.97, 'avg_prefix_length': 2.88, + 'clustering_factor': 1.0, 'compression_ratio': 1.1, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 6, + 'name': 'NEEDX', 'nodes': 33, 'ratio': 0.03, 'root_page': 291, 'total_dup': 11}, + {'avg_data_length': 1.85, 'avg_key_length': 4.03, 'avg_node_length': 5.06, 'avg_prefix_length': 11.18, + 'clustering_factor': 1.0, 'compression_ratio': 3.23, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, 'leaf_buckets': 1, 'max_dup': 3, + 'name': 'QTYX', 'nodes': 33, 'ratio': 0.03, 'root_page': 292, 'total_dup': 11}, + {'avg_data_length': 0.52, 'avg_key_length': 2.52, 'avg_node_length': 3.55, 'avg_prefix_length': 2.48, + 'clustering_factor': 1.0, 'compression_ratio': 1.19, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN25', 'nodes': 33, 'ratio': 0.03, 'root_page': 270, 'total_dup': 18}, + {'avg_data_length': 0.45, 'avg_key_length': 2.64, 'avg_node_length': 3.67, 'avg_prefix_length': 2.21, + 'clustering_factor': 1.0, 'compression_ratio': 1.01, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 7, + 'name': 'RDB$FOREIGN26', 'nodes': 33, 'ratio': 0.03, 'root_page': 271, 'total_dup': 25}, + {'avg_data_length': 4.48, 'avg_key_length': 7.42, 'avg_node_length': 8.45, 'avg_prefix_length': 3.52, + 'clustering_factor': 1.0, 'compression_ratio': 1.08, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY24', 'nodes': 33, 'ratio': 0.03, 'root_page': 269, 'total_dup': 0}, + {'avg_data_length': 0.97, 'avg_key_length': 3.03, 'avg_node_length': 4.06, 'avg_prefix_length': 9.82, + 'clustering_factor': 1.0, 'compression_ratio': 3.56, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 5, 'leaf_buckets': 1, 'max_dup': 14, + 'name': 'SALESTATX', 'nodes': 33, 'ratio': 0.03, 'root_page': 293, 'total_dup': 27}, + {'avg_data_length': 0.0, 'avg_key_length': 0.0, 'avg_node_length': 0.0, 'avg_prefix_length': 0.0, + 'clustering_factor': 0.0, 'compression_ratio': 0.0, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY28', 'nodes': 0, 'ratio': 0.0, 'root_page': 317, 'total_dup': 0}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.indices[i], ['table']), 'Unexpected output from parser (indices)') + i += 1 + def test_parse30_r(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat30-r.out')) + # + self.assertTrue(db.has_table_stats()) + self.assertTrue(db.has_index_stats()) + self.assertTrue(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertFalse(db.has_system()) + # Tables + data = [{'avg_fill': 86, 'avg_fragment_length': 0.0, 'avg_record_length': 2.79, 'avg_unpacked_length': 120.0, + 'avg_version_length': 16.61, 'blob_pages': 0, 'blobs': 125, 'blobs_total_length': 11237, 'compression_ratio': 42.99, + 'data_page_slots': 3, 'data_pages': 3, 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=1, d100=2), + 'empty_pages': 0, 'full_pages': 1, 'index_root_page': 299, 'indices': 0, 'level_0': 125, 'level_1': 0, 'level_2': 0, + 'max_fragments': 0, 'max_versions': 1, 'name': 'AR', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 297, + 'secondary_pages': 2, 'swept_pages': 0, 'table_id': 140, 'total_formats': 1, 'total_fragments': 0, 'total_records': 120, + 'total_versions': 105, 'used_formats': 1}, + {'avg_fill': 8, 'avg_fragment_length': 0.0, 'avg_record_length': 25.94, 'avg_unpacked_length': 34.0, + 'avg_version_length': 0.0, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': 1.31, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 183, 'indices': 1, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': 0, 'max_versions': 0, 'name': 'COUNTRY', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 182, + 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 128, 'total_formats': 1, 'total_fragments': 0, 'total_records': 16, + 'total_versions': 0, 'used_formats': 1}, + {'avg_fill': 26, 'avg_fragment_length': 0.0, 'avg_record_length': 125.47, 'avg_unpacked_length': 241.0, + 'avg_version_length': 0.0, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': 1.92, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 262, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': 0, 'max_versions': 0, 'name': 'CUSTOMER', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 261, + 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 137, 'total_formats': 1, 'total_fragments': 0, 'total_records': 15, + 'total_versions': 0, 'used_formats': 1}, + {'avg_fill': 24, 'avg_fragment_length': 0.0, 'avg_record_length': 74.62, 'avg_unpacked_length': 88.0, + 'avg_version_length': 0.0, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': 1.18, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 199, 'indices': 5, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': 0, 'max_versions': 0, 'name': 'DEPARTMENT', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 198, + 'secondary_pages': 0, 'swept_pages': 1, 'table_id': 130, 'total_formats': 1, 'total_fragments': 0, 'total_records': 21, + 'total_versions': 0, 'used_formats': 1}, + {'avg_fill': 44, 'avg_fragment_length': 0.0, 'avg_record_length': 69.02, 'avg_unpacked_length': 39.0, + 'avg_version_length': 0.0, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': 0.57, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=0, d50=1, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 213, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': 0, 'max_versions': 0, 'name': 'EMPLOYEE', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 212, + 'secondary_pages': 0, 'swept_pages': 1, 'table_id': 131, 'total_formats': 1, 'total_fragments': 0, 'total_records': 42, + 'total_versions': 0, 'used_formats': 1}, + {'avg_fill': 10, 'avg_fragment_length': 0.0, 'avg_record_length': 12.0, 'avg_unpacked_length': 11.0, + 'avg_version_length': 0.0, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': 0.92, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 235, 'indices': 3, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': 0, 'max_versions': 0, 'name': 'EMPLOYEE_PROJECT', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 234, + 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 134, 'total_formats': 1, 'total_fragments': 0, 'total_records': 28, + 'total_versions': 0, 'used_formats': 1}, + {'avg_fill': 54, 'avg_fragment_length': 0.0, 'avg_record_length': 66.13, 'avg_unpacked_length': 96.0, + 'avg_version_length': 0.0, 'blob_pages': 0, 'blobs': 39, 'blobs_total_length': 4840, 'compression_ratio': 1.45, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=1, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 190, 'indices': 4, 'level_0': 39, 'level_1': 0, 'level_2': 0, + 'max_fragments': 0, 'max_versions': 0, 'name': 'JOB', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 189, + 'secondary_pages': 1, 'swept_pages': 1, 'table_id': 129, 'total_formats': 1, 'total_fragments': 0, 'total_records': 31, + 'total_versions': 0, 'used_formats': 1}, + {'avg_fill': 7, 'avg_fragment_length': 0.0, 'avg_record_length': 49.67, 'avg_unpacked_length': 56.0, + 'avg_version_length': 0.0, 'blob_pages': 0, 'blobs': 6, 'blobs_total_length': 548, 'compression_ratio': 1.13, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=2, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 221, 'indices': 4, 'level_0': 6, 'level_1': 0, 'level_2': 0, + 'max_fragments': 0, 'max_versions': 0, 'name': 'PROJECT', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 220, + 'secondary_pages': 1, 'swept_pages': 1, 'table_id': 133, 'total_formats': 1, 'total_fragments': 0, 'total_records': 6, + 'total_versions': 0, 'used_formats': 1}, + {'avg_fill': 20, 'avg_fragment_length': 0.0, 'avg_record_length': 30.58, 'avg_unpacked_length': 32.0, + 'avg_version_length': 0.0, 'blob_pages': 0, 'blobs': 24, 'blobs_total_length': 1344, 'compression_ratio': 1.05, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=1, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 248, 'indices': 3, 'level_0': 24, 'level_1': 0, 'level_2': 0, + 'max_fragments': 0, 'max_versions': 0, 'name': 'PROJ_DEPT_BUDGET', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 239, + 'secondary_pages': 1, 'swept_pages': 0, 'table_id': 135, 'total_formats': 1, 'total_fragments': 0, 'total_records': 24, + 'total_versions': 0, 'used_formats': 1}, + {'avg_fill': 30, 'avg_fragment_length': 0.0, 'avg_record_length': 33.29, 'avg_unpacked_length': 8.0, + 'avg_version_length': 0.0, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': 0.24, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 254, 'indices': 4, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': 0, 'max_versions': 0, 'name': 'SALARY_HISTORY', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 253, + 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 136, 'total_formats': 1, 'total_fragments': 0, 'total_records': 49, + 'total_versions': 0, 'used_formats': 1}, + {'avg_fill': 35, 'avg_fragment_length': 0.0, 'avg_record_length': 68.82, 'avg_unpacked_length': 8.0, + 'avg_version_length': 0.0, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': 0.12, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=0, d40=1, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 268, 'indices': 6, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': 0, 'max_versions': 0, 'name': 'SALES', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 267, + 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 138, 'total_formats': 1, 'total_fragments': 0, 'total_records': 33, + 'total_versions': 0, 'used_formats': 1}, + {'avg_fill': 0, 'avg_fragment_length': 0.0, 'avg_record_length': 0.0, 'avg_unpacked_length': 0.0, + 'avg_version_length': 0.0, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': 0.0, + 'data_page_slots': 0, 'data_pages': 0, 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 324, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': 0, 'max_versions': 0, 'name': 'T', 'pointer_pages': 1, 'primary_pages': 0, 'primary_pointer_page': 323, + 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 147, 'total_formats': 1, 'total_fragments': 0, 'total_records': 0, + 'total_versions': 0, 'used_formats': 0}, + {'avg_fill': 8, 'avg_fragment_length': 0.0, 'avg_record_length': 0.0, 'avg_unpacked_length': 120.0, + 'avg_version_length': 14.25, 'blob_pages': 0, 'blobs': 3, 'blobs_total_length': 954, 'compression_ratio': 0.0, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=2, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 303, 'indices': 0, 'level_0': 3, 'level_1': 0, 'level_2': 0, + 'max_fragments': 0, 'max_versions': 1, 'name': 'T2', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 302, + 'secondary_pages': 1, 'swept_pages': 0, 'table_id': 142, 'total_formats': 1, 'total_fragments': 0, 'total_records': 4, + 'total_versions': 4, 'used_formats': 1}, + {'avg_fill': 3, 'avg_fragment_length': 0.0, 'avg_record_length': 0.0, 'avg_unpacked_length': 112.0, + 'avg_version_length': 22.67, 'blob_pages': 0, 'blobs': 2, 'blobs_total_length': 313, 'compression_ratio': 0.0, + 'data_page_slots': 2, 'data_pages': 2, 'distribution': FillDistribution(d20=2, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 306, 'indices': 0, 'level_0': 2, 'level_1': 0, 'level_2': 0, + 'max_fragments': 0, 'max_versions': 1, 'name': 'T3', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 305, + 'secondary_pages': 1, 'swept_pages': 0, 'table_id': 143, 'total_formats': 1, 'total_fragments': 0, 'total_records': 3, + 'total_versions': 3, 'used_formats': 1}, + {'avg_fill': 3, 'avg_fragment_length': 0.0, 'avg_record_length': 0.0, 'avg_unpacked_length': 264.0, + 'avg_version_length': 75.0, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': 0.0, + 'data_page_slots': 1, 'data_pages': 1, 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 308, 'indices': 0, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': 0, 'max_versions': 1, 'name': 'T4', 'pointer_pages': 1, 'primary_pages': 1, 'primary_pointer_page': 307, + 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 144, 'total_formats': 1, 'total_fragments': 0, 'total_records': 2, + 'total_versions': 2, 'used_formats': 1}, + {'avg_fill': 0, 'avg_fragment_length': 0.0, 'avg_record_length': 0.0, 'avg_unpacked_length': 0.0, + 'avg_version_length': 0.0, 'blob_pages': None, 'blobs': None, 'blobs_total_length': None, 'compression_ratio': 0.0, + 'data_page_slots': 0, 'data_pages': 0, 'distribution': FillDistribution(d20=0, d40=0, d50=0, d80=0, d100=0), + 'empty_pages': 0, 'full_pages': 0, 'index_root_page': 316, 'indices': 1, 'level_0': None, 'level_1': None, 'level_2': None, + 'max_fragments': 0, 'max_versions': 0, 'name': 'T5', 'pointer_pages': 1, 'primary_pages': 0, 'primary_pointer_page': 315, + 'secondary_pages': 0, 'swept_pages': 0, 'table_id': 145, 'total_formats': 1, 'total_fragments': 0, 'total_records': 0, + 'total_versions': 0, 'used_formats': 0}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.tables[i]), 'Unexpected output from parser (tables)') + i += 1 + # Indices + data = [{'avg_data_length': 6.44, 'avg_key_length': 8.63, 'avg_node_length': 10.44, 'avg_prefix_length': 0.44, + 'clustering_factor': 1.0, 'compression_ratio': 0.8, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY1', 'nodes': 16, 'ratio': 0.06, 'root_page': 186, 'total_dup': 0}, + {'avg_data_length': 15.87, 'avg_key_length': 18.27, 'avg_node_length': 19.87, 'avg_prefix_length': 0.6, + 'clustering_factor': 1.0, 'compression_ratio': 0.9, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'CUSTNAMEX', 'nodes': 15, 'ratio': 0.07, 'root_page': 276, 'total_dup': 0}, + {'avg_data_length': 17.27, 'avg_key_length': 20.2, 'avg_node_length': 21.27, 'avg_prefix_length': 2.33, + 'clustering_factor': 1.0, 'compression_ratio': 0.97, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'CUSTREGION', 'nodes': 15, 'ratio': 0.07, 'root_page': 283, 'total_dup': 0}, + {'avg_data_length': 4.87, 'avg_key_length': 6.93, 'avg_node_length': 8.6, 'avg_prefix_length': 0.87, + 'clustering_factor': 1.0, 'compression_ratio': 0.83, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN23', 'nodes': 15, 'ratio': 0.07, 'root_page': 264, 'total_dup': 4}, + {'avg_data_length': 1.13, 'avg_key_length': 3.13, 'avg_node_length': 4.2, 'avg_prefix_length': 1.87, + 'clustering_factor': 1.0, 'compression_ratio': 0.96, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY22', 'nodes': 15, 'ratio': 0.07, 'root_page': 263, 'total_dup': 0}, + {'avg_data_length': 5.38, 'avg_key_length': 8.0, 'avg_node_length': 9.05, 'avg_prefix_length': 3.62, + 'clustering_factor': 1.0, 'compression_ratio': 1.13, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 3, + 'name': 'BUDGETX', 'nodes': 21, 'ratio': 0.05, 'root_page': 284, 'total_dup': 7}, + {'avg_data_length': 13.95, 'avg_key_length': 16.57, 'avg_node_length': 17.95, 'avg_prefix_length': 5.29, + 'clustering_factor': 1.0, 'compression_ratio': 1.16, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$4', 'nodes': 21, 'ratio': 0.05, 'root_page': 208, 'total_dup': 0}, + {'avg_data_length': 1.14, 'avg_key_length': 3.24, 'avg_node_length': 4.29, 'avg_prefix_length': 0.81, + 'clustering_factor': 1.0, 'compression_ratio': 0.6, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, 'leaf_buckets': 1, 'max_dup': 3, + 'name': 'RDB$FOREIGN10', 'nodes': 21, 'ratio': 0.05, 'root_page': 219, 'total_dup': 3}, + {'avg_data_length': 0.81, 'avg_key_length': 2.95, 'avg_node_length': 4.1, 'avg_prefix_length': 2.05, + 'clustering_factor': 1.0, 'compression_ratio': 0.97, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN6', 'nodes': 21, 'ratio': 0.05, 'root_page': 210, 'total_dup': 13}, + {'avg_data_length': 1.71, 'avg_key_length': 4.05, 'avg_node_length': 5.24, 'avg_prefix_length': 1.29, + 'clustering_factor': 1.0, 'compression_ratio': 0.74, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY5', 'nodes': 21, 'ratio': 0.05, 'root_page': 209, 'total_dup': 0}, + {'avg_data_length': 15.52, 'avg_key_length': 18.5, 'avg_node_length': 19.52, 'avg_prefix_length': 2.17, + 'clustering_factor': 1.0, 'compression_ratio': 0.96, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'NAMEX', 'nodes': 42, 'ratio': 0.02, 'root_page': 285, 'total_dup': 0}, + {'avg_data_length': 0.81, 'avg_key_length': 2.98, 'avg_node_length': 4.07, 'avg_prefix_length': 2.19, + 'clustering_factor': 1.0, 'compression_ratio': 1.01, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN8', 'nodes': 42, 'ratio': 0.02, 'root_page': 215, 'total_dup': 23}, + {'avg_data_length': 6.79, 'avg_key_length': 9.4, 'avg_node_length': 10.43, 'avg_prefix_length': 9.05, + 'clustering_factor': 1.0, 'compression_ratio': 1.68, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN9', 'nodes': 42, 'ratio': 0.02, 'root_page': 216, 'total_dup': 15}, + {'avg_data_length': 1.31, 'avg_key_length': 3.6, 'avg_node_length': 4.62, 'avg_prefix_length': 1.17, + 'clustering_factor': 1.0, 'compression_ratio': 0.69, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY7', 'nodes': 42, 'ratio': 0.02, 'root_page': 214, 'total_dup': 0}, + {'avg_data_length': 1.04, 'avg_key_length': 3.25, 'avg_node_length': 4.29, 'avg_prefix_length': 1.36, + 'clustering_factor': 1.0, 'compression_ratio': 0.74, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 2, + 'name': 'RDB$FOREIGN15', 'nodes': 28, 'ratio': 0.04, 'root_page': 237, 'total_dup': 6}, + {'avg_data_length': 0.86, 'avg_key_length': 2.89, 'avg_node_length': 4.04, 'avg_prefix_length': 4.14, + 'clustering_factor': 1.0, 'compression_ratio': 1.73, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 9, + 'name': 'RDB$FOREIGN16', 'nodes': 28, 'ratio': 0.04, 'root_page': 238, 'total_dup': 23}, + {'avg_data_length': 9.11, 'avg_key_length': 12.07, 'avg_node_length': 13.11, 'avg_prefix_length': 2.89, + 'clustering_factor': 1.0, 'compression_ratio': 0.99, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY14', 'nodes': 28, 'ratio': 0.04, 'root_page': 236, 'total_dup': 0}, + {'avg_data_length': 10.9, 'avg_key_length': 13.71, 'avg_node_length': 14.74, 'avg_prefix_length': 7.87, + 'clustering_factor': 1.0, 'compression_ratio': 1.37, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 1, + 'name': 'MAXSALX', 'nodes': 31, 'ratio': 0.03, 'root_page': 286, 'total_dup': 5}, + {'avg_data_length': 10.29, 'avg_key_length': 13.03, 'avg_node_length': 14.06, 'avg_prefix_length': 8.48, + 'clustering_factor': 1.0, 'compression_ratio': 1.44, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 2, + 'name': 'MINSALX', 'nodes': 31, 'ratio': 0.03, 'root_page': 287, 'total_dup': 7}, + {'avg_data_length': 1.39, 'avg_key_length': 3.39, 'avg_node_length': 4.61, 'avg_prefix_length': 2.77, + 'clustering_factor': 1.0, 'compression_ratio': 1.23, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 20, + 'name': 'RDB$FOREIGN3', 'nodes': 31, 'ratio': 0.03, 'root_page': 192, 'total_dup': 24}, + {'avg_data_length': 10.45, 'avg_key_length': 13.42, 'avg_node_length': 14.45, 'avg_prefix_length': 6.19, + 'clustering_factor': 1.0, 'compression_ratio': 1.24, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY2', 'nodes': 31, 'ratio': 0.03, 'root_page': 191, 'total_dup': 0}, + {'avg_data_length': 22.5, 'avg_key_length': 25.33, 'avg_node_length': 26.5, 'avg_prefix_length': 4.17, + 'clustering_factor': 1.0, 'compression_ratio': 1.05, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'PRODTYPEX', 'nodes': 6, 'ratio': 0.17, 'root_page': 288, 'total_dup': 0}, + {'avg_data_length': 13.33, 'avg_key_length': 15.5, 'avg_node_length': 17.33, 'avg_prefix_length': 0.33, + 'clustering_factor': 1.0, 'compression_ratio': 0.88, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$11', 'nodes': 6, 'ratio': 0.17, 'root_page': 222, 'total_dup': 0}, + {'avg_data_length': 1.33, 'avg_key_length': 3.5, 'avg_node_length': 4.67, 'avg_prefix_length': 0.67, + 'clustering_factor': 1.0, 'compression_ratio': 0.57, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$FOREIGN13', 'nodes': 6, 'ratio': 0.17, 'root_page': 232, 'total_dup': 0}, + {'avg_data_length': 4.83, 'avg_key_length': 7.0, 'avg_node_length': 8.83, 'avg_prefix_length': 0.17, + 'clustering_factor': 1.0, 'compression_ratio': 0.71, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY12', 'nodes': 6, 'ratio': 0.17, 'root_page': 223, 'total_dup': 0}, + {'avg_data_length': 0.71, 'avg_key_length': 2.79, 'avg_node_length': 3.92, 'avg_prefix_length': 2.29, + 'clustering_factor': 1.0, 'compression_ratio': 1.07, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 5, + 'name': 'RDB$FOREIGN18', 'nodes': 24, 'ratio': 0.04, 'root_page': 250, 'total_dup': 15}, + {'avg_data_length': 1.0, 'avg_key_length': 3.04, 'avg_node_length': 4.21, 'avg_prefix_length': 4.0, + 'clustering_factor': 1.0, 'compression_ratio': 1.64, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 8, + 'name': 'RDB$FOREIGN19', 'nodes': 24, 'ratio': 0.04, 'root_page': 251, 'total_dup': 19}, + {'avg_data_length': 6.83, 'avg_key_length': 9.67, 'avg_node_length': 10.71, 'avg_prefix_length': 12.17, + 'clustering_factor': 1.0, 'compression_ratio': 1.97, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY17', 'nodes': 24, 'ratio': 0.04, 'root_page': 249, 'total_dup': 0}, + {'avg_data_length': 0.31, 'avg_key_length': 2.35, 'avg_node_length': 3.37, 'avg_prefix_length': 6.69, + 'clustering_factor': 1.0, 'compression_ratio': 2.98, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 21, + 'name': 'CHANGEX', 'nodes': 49, 'ratio': 0.02, 'root_page': 289, 'total_dup': 46}, + {'avg_data_length': 0.9, 'avg_key_length': 3.1, 'avg_node_length': 4.12, 'avg_prefix_length': 1.43, + 'clustering_factor': 1.0, 'compression_ratio': 0.75, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 2, + 'name': 'RDB$FOREIGN21', 'nodes': 49, 'ratio': 0.02, 'root_page': 256, 'total_dup': 16}, + {'avg_data_length': 18.29, 'avg_key_length': 21.27, 'avg_node_length': 22.29, 'avg_prefix_length': 4.31, + 'clustering_factor': 1.0, 'compression_ratio': 1.06, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY20', 'nodes': 49, 'ratio': 0.02, 'root_page': 255, 'total_dup': 0}, + {'avg_data_length': 0.29, 'avg_key_length': 2.29, 'avg_node_length': 3.35, 'avg_prefix_length': 5.39, + 'clustering_factor': 1.0, 'compression_ratio': 2.48, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 28, + 'name': 'UPDATERX', 'nodes': 49, 'ratio': 0.02, 'root_page': 290, 'total_dup': 46}, + {'avg_data_length': 2.55, 'avg_key_length': 4.94, 'avg_node_length': 5.97, 'avg_prefix_length': 2.88, + 'clustering_factor': 1.0, 'compression_ratio': 1.1, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 3, 'leaf_buckets': 1, 'max_dup': 6, + 'name': 'NEEDX', 'nodes': 33, 'ratio': 0.03, 'root_page': 291, 'total_dup': 11}, + {'avg_data_length': 1.85, 'avg_key_length': 4.03, 'avg_node_length': 5.06, 'avg_prefix_length': 11.18, + 'clustering_factor': 1.0, 'compression_ratio': 3.23, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 4, 'leaf_buckets': 1, 'max_dup': 3, + 'name': 'QTYX', 'nodes': 33, 'ratio': 0.03, 'root_page': 292, 'total_dup': 11}, + {'avg_data_length': 0.52, 'avg_key_length': 2.52, 'avg_node_length': 3.55, 'avg_prefix_length': 2.48, + 'clustering_factor': 1.0, 'compression_ratio': 1.19, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 1, 'leaf_buckets': 1, 'max_dup': 4, + 'name': 'RDB$FOREIGN25', 'nodes': 33, 'ratio': 0.03, 'root_page': 270, 'total_dup': 18}, + {'avg_data_length': 0.45, 'avg_key_length': 2.64, 'avg_node_length': 3.67, 'avg_prefix_length': 2.21, + 'clustering_factor': 1.0, 'compression_ratio': 1.01, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 2, 'leaf_buckets': 1, 'max_dup': 7, + 'name': 'RDB$FOREIGN26', 'nodes': 33, 'ratio': 0.03, 'root_page': 271, 'total_dup': 25}, + {'avg_data_length': 4.48, 'avg_key_length': 7.42, 'avg_node_length': 8.45, 'avg_prefix_length': 3.52, + 'clustering_factor': 1.0, 'compression_ratio': 1.08, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY24', 'nodes': 33, 'ratio': 0.03, 'root_page': 269, 'total_dup': 0}, + {'avg_data_length': 0.97, 'avg_key_length': 3.03, 'avg_node_length': 4.06, 'avg_prefix_length': 9.82, + 'clustering_factor': 1.0, 'compression_ratio': 3.56, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 5, 'leaf_buckets': 1, 'max_dup': 14, + 'name': 'SALESTATX', 'nodes': 33, 'ratio': 0.03, 'root_page': 293, 'total_dup': 27}, + {'avg_data_length': 0.0, 'avg_key_length': 0.0, 'avg_node_length': 0.0, 'avg_prefix_length': 0.0, + 'clustering_factor': 0.0, 'compression_ratio': 0.0, 'depth': 1, + 'distribution': FillDistribution(d20=1, d40=0, d50=0, d80=0, d100=0), 'index_id': 0, 'leaf_buckets': 1, 'max_dup': 0, + 'name': 'RDB$PRIMARY28', 'nodes': 0, 'ratio': 0.0, 'root_page': 317, 'total_dup': 0}] + i = 0 + while i < len(db.tables): + self.assertDictEqual(data[i], get_object_data(db.indices[i], ['table']), 'Unexpected output from parser (indices)') + i += 1 + def test_parse30_s(self): + db = self._parse_file(os.path.join(self.dbpath, 'gstat30-s.out')) + # + self.assertTrue(db.has_table_stats()) + self.assertTrue(db.has_index_stats()) + self.assertFalse(db.has_row_stats()) + self.assertFalse(db.has_encryption_stats()) + self.assertTrue(db.has_system()) + # Check system tables + data = ['RDB$AUTH_MAPPING', 'RDB$BACKUP_HISTORY', 'RDB$CHARACTER_SETS', 'RDB$CHECK_CONSTRAINTS', 'RDB$COLLATIONS', 'RDB$DATABASE', + 'RDB$DB_CREATORS', 'RDB$DEPENDENCIES', 'RDB$EXCEPTIONS', 'RDB$FIELDS', 'RDB$FIELD_DIMENSIONS', 'RDB$FILES', 'RDB$FILTERS', + 'RDB$FORMATS', 'RDB$FUNCTIONS', 'RDB$FUNCTION_ARGUMENTS', 'RDB$GENERATORS', 'RDB$INDEX_SEGMENTS', 'RDB$INDICES', + 'RDB$LOG_FILES', 'RDB$PACKAGES', 'RDB$PAGES', 'RDB$PROCEDURES', 'RDB$PROCEDURE_PARAMETERS', 'RDB$REF_CONSTRAINTS', + 'RDB$RELATIONS', 'RDB$RELATION_CONSTRAINTS', 'RDB$RELATION_FIELDS', 'RDB$ROLES', 'RDB$SECURITY_CLASSES', 'RDB$TRANSACTIONS', + 'RDB$TRIGGERS', 'RDB$TRIGGER_MESSAGES', 'RDB$TYPES', 'RDB$USER_PRIVILEGES', 'RDB$VIEW_RELATIONS'] + for table in db.tables: + if table.name.startswith('RDB$'): + self.assertIn(table.name, data) + # check system indices + data = ['RDB$PRIMARY1', 'RDB$FOREIGN23', 'RDB$PRIMARY22', 'RDB$4', 'RDB$FOREIGN10', 'RDB$FOREIGN6', 'RDB$PRIMARY5', 'RDB$FOREIGN8', + 'RDB$FOREIGN9', 'RDB$PRIMARY7', 'RDB$FOREIGN15', 'RDB$FOREIGN16', 'RDB$PRIMARY14', 'RDB$FOREIGN3', 'RDB$PRIMARY2', 'RDB$11', + 'RDB$FOREIGN13', 'RDB$PRIMARY12', 'RDB$FOREIGN18', 'RDB$FOREIGN19', 'RDB$PRIMARY17', 'RDB$INDEX_52', 'RDB$INDEX_44', + 'RDB$INDEX_19', 'RDB$INDEX_25', 'RDB$INDEX_14', 'RDB$INDEX_40', 'RDB$INDEX_20', 'RDB$INDEX_26', 'RDB$INDEX_27', + 'RDB$INDEX_28', 'RDB$INDEX_23', 'RDB$INDEX_24', 'RDB$INDEX_2', 'RDB$INDEX_36', 'RDB$INDEX_17', 'RDB$INDEX_45', + 'RDB$INDEX_16', 'RDB$INDEX_53', 'RDB$INDEX_9', 'RDB$INDEX_10', 'RDB$INDEX_49', 'RDB$INDEX_51', 'RDB$INDEX_11', + 'RDB$INDEX_46', 'RDB$INDEX_6', 'RDB$INDEX_31', 'RDB$INDEX_41', 'RDB$INDEX_5', 'RDB$INDEX_47', 'RDB$INDEX_21', 'RDB$INDEX_22', + 'RDB$INDEX_18', 'RDB$INDEX_48', 'RDB$INDEX_50', 'RDB$INDEX_13', 'RDB$INDEX_0', 'RDB$INDEX_1', 'RDB$INDEX_12', 'RDB$INDEX_42', + 'RDB$INDEX_43', 'RDB$INDEX_15', 'RDB$INDEX_3', 'RDB$INDEX_4', 'RDB$INDEX_39', 'RDB$INDEX_7', 'RDB$INDEX_32', 'RDB$INDEX_38', + 'RDB$INDEX_8', 'RDB$INDEX_35', 'RDB$INDEX_37', 'RDB$INDEX_29', 'RDB$INDEX_30', 'RDB$INDEX_33', 'RDB$INDEX_34', + 'RDB$FOREIGN21', 'RDB$PRIMARY20', 'RDB$FOREIGN25', 'RDB$FOREIGN26', 'RDB$PRIMARY24', 'RDB$PRIMARY28'] + for index in db.indices: + if index.name.startswith('RDB$'): + self.assertIn(index.name, data) + +class TestLogParse(FDBTestBase): + def setUp(self): + super(TestLogParse, self).setUp() + def _check_events(self, trace_lines, output): + for obj in log.parse(linesplit_iter(trace_lines)): + self.printout(str(obj)) + self.assertEqual(self.output.getvalue(), output, "Parsed events do not match expected ones") + def test_locale(self): + data = """ + +SRVDB1 Tue Apr 04 21:25:40 2017 + INET/inet_error: read errno = 10054 + +""" + output = """LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 25, 40), message='INET/inet_error: read errno = 10054') +""" + locale = getlocale(LC_ALL) + if locale[0] is None: + setlocale(LC_ALL,'') + locale = getlocale(LC_ALL) + try: + self._check_events(data, output) + self.assertEquals(locale, getlocale(LC_ALL), "Locale must not change") + self.clear_output() + if sys.platform == 'win32': + setlocale(LC_ALL, 'Czech_Czech Republic') + else: + setlocale(LC_ALL, 'cs_CZ') + nlocale = getlocale(LC_ALL) + self._check_events(data, output) + self.assertEquals(nlocale, getlocale(LC_ALL), "Locale must not change") + finally: + setlocale(LC_ALL, locale) + def TestWindowsService(self): + data = """ + +SRVDB1 Tue Apr 04 21:25:40 2017 + INET/inet_error: read errno = 10054 + + +SRVDB1 Tue Apr 04 21:25:41 2017 + Unable to complete network request to host "SRVDB1". + Error reading data from the connection. + + +SRVDB1 Tue Apr 04 21:25:42 2017 + INET/inet_error: read errno = 10054 + + +SRVDB1 Tue Apr 04 21:25:43 2017 + Unable to complete network request to host "SRVDB1". + Error reading data from the connection. + + +SRVDB1 Tue Apr 04 21:28:48 2017 + INET/inet_error: read errno = 10054 + + +SRVDB1 Tue Apr 04 21:28:50 2017 + Unable to complete network request to host "SRVDB1". + Error reading data from the connection. + + +SRVDB1 Tue Apr 04 21:28:51 2017 + Sweep is started by SYSDBA + Database "Mydatabase" + OIT 551120654, OAT 551120655, OST 551120655, Next 551121770 + + +SRVDB1 Tue Apr 04 21:28:52 2017 + INET/inet_error: read errno = 10054 + + +SRVDB1 Tue Apr 04 21:28:53 2017 + Unable to complete network request to host "SRVDB1". + Error reading data from the connection. + + +SRVDB1 Tue Apr 04 21:28:54 2017 + Sweep is finished + Database "Mydatabase" + OIT 551234848, OAT 551234849, OST 551234849, Next 551235006 + + +SRVDB1 Tue Apr 04 21:28:55 2017 + Sweep is started by SWEEPER + Database "Mydatabase" + OIT 551243753, OAT 551846279, OST 551846279, Next 551846385 + + +SRVDB1 Tue Apr 04 21:28:56 2017 + INET/inet_error: read errno = 10054 + + +SRVDB1 Tue Apr 04 21:28:57 2017 + Sweep is finished + Database "Mydatabase" + OIT 551846278, OAT 551976724, OST 551976724, Next 551976730 + + +SRVDB1 Tue Apr 04 21:28:58 2017 + Unable to complete network request to host "(unknown)". + Error reading data from the connection. + + +SRVDB1 Thu Apr 06 12:52:56 2017 + Shutting down the server with 1 active connection(s) to 1 database(s), 0 active service(s) + + +""" + output = """LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 25, 40), message='INET/inet_error: read errno = 10054') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 25, 41), message='Unable to complete network request to host "SRVDB1".\\nError reading data from the connection.') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 25, 42), message='INET/inet_error: read errno = 10054') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 25, 43), message='Unable to complete network request to host "SRVDB1".\\nError reading data from the connection.') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 28, 48), message='INET/inet_error: read errno = 10054') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 28, 50), message='Unable to complete network request to host "SRVDB1".\\nError reading data from the connection.') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 28, 51), message='Sweep is started by SYSDBA\\nDatabase "Mydatabase"\\nOIT 551120654, OAT 551120655, OST 551120655, Next 551121770') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 28, 52), message='INET/inet_error: read errno = 10054') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 28, 53), message='Unable to complete network request to host "SRVDB1".\\nError reading data from the connection.') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 28, 54), message='Sweep is finished\\nDatabase "Mydatabase"\\nOIT 551234848, OAT 551234849, OST 551234849, Next 551235006') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 28, 55), message='Sweep is started by SWEEPER\\nDatabase "Mydatabase"\\nOIT 551243753, OAT 551846279, OST 551846279, Next 551846385') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 28, 56), message='INET/inet_error: read errno = 10054') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 28, 57), message='Sweep is finished\\nDatabase "Mydatabase"\\nOIT 551846278, OAT 551976724, OST 551976724, Next 551976730') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 4, 21, 28, 58), message='Unable to complete network request to host "(unknown)".\\nError reading data from the connection.') +LogEntry(source_id='SRVDB1', timestamp=datetime.datetime(2017, 4, 6, 12, 52, 56), message='Shutting down the server with 1 active connection(s) to 1 database(s), 0 active service(s)') +""" + self._check_events(data, output) + def TestLinuxDirect(self): + data = """ +MyServer (Client) Fri Apr 6 16:35:46 2018 + INET/inet_error: connect errno = 111 + + +MyServer (Client) Fri Apr 6 16:51:31 2018 + /opt/firebird/bin/fbguard: guardian starting /opt/firebird/bin/fbserver + + + +MyServer (Server) Fri Apr 6 16:55:23 2018 + activating shadow file /home/db/test_employee.fdb + + +MyServer (Server) Fri Apr 6 16:55:31 2018 + Sweep is started by SYSDBA + Database "/home/db/test_employee.fdb" + OIT 1, OAT 0, OST 0, Next 1 + + +MyServer (Server) Fri Apr 6 16:55:31 2018 + Sweep is finished + Database "/home/db/test_employee.fdb" + OIT 1, OAT 0, OST 0, Next 2 + + +MyServer (Client) Fri Apr 6 20:18:52 2018 + /opt/firebird/bin/fbguard: /opt/firebird/bin/fbserver normal shutdown. + + + +MyServer (Client) Mon Apr 9 08:28:29 2018 + /opt/firebird/bin/fbguard: guardian starting /opt/firebird/bin/fbserver + + + +MyServer (Server) Tue Apr 17 15:01:27 2018 + INET/inet_error: invalid socket in packet_receive errno = 22 + + +MyServer (Client) Tue Apr 17 19:42:55 2018 + /opt/firebird/bin/fbguard: /opt/firebird/bin/fbserver normal shutdown. + + + +""" + output = """LogEntry(source_id='MyServer (Client)', timestamp=datetime.datetime(2018, 4, 6, 16, 35, 46), message='INET/inet_error: connect errno = 111') +LogEntry(source_id='MyServer (Client)', timestamp=datetime.datetime(2018, 4, 6, 16, 51, 31), message='/opt/firebird/bin/fbguard: guardian starting /opt/firebird/bin/fbserver') +LogEntry(source_id='MyServer (Server)', timestamp=datetime.datetime(2018, 4, 6, 16, 55, 23), message='activating shadow file /home/db/test_employee.fdb') +LogEntry(source_id='MyServer (Server)', timestamp=datetime.datetime(2018, 4, 6, 16, 55, 31), message='Sweep is started by SYSDBA\\nDatabase "/home/db/test_employee.fdb"\\nOIT 1, OAT 0, OST 0, Next 1') +LogEntry(source_id='MyServer (Server)', timestamp=datetime.datetime(2018, 4, 6, 16, 55, 31), message='Sweep is finished\\nDatabase "/home/db/test_employee.fdb"\\nOIT 1, OAT 0, OST 0, Next 2') +LogEntry(source_id='MyServer (Client)', timestamp=datetime.datetime(2018, 4, 6, 20, 18, 52), message='/opt/firebird/bin/fbguard: /opt/firebird/bin/fbserver normal shutdown.') +LogEntry(source_id='MyServer (Client)', timestamp=datetime.datetime(2018, 4, 9, 8, 28, 29), message='/opt/firebird/bin/fbguard: guardian starting /opt/firebird/bin/fbserver') +LogEntry(source_id='MyServer (Server)', timestamp=datetime.datetime(2018, 4, 17, 15, 1, 27), message='INET/inet_error: invalid socket in packet_receive errno = 22') +LogEntry(source_id='MyServer (Client)', timestamp=datetime.datetime(2018, 4, 17, 19, 42, 55), message='/opt/firebird/bin/fbguard: /opt/firebird/bin/fbserver normal shutdown.') +""" + self._check_events(data, output) + if __name__ == '__main__': unittest.main()